var/home/core/zuul-output/0000755000175000017500000000000015116247134014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116263130015467 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005364037715116263122017713 0ustar rootrootDec 10 10:45:18 crc systemd[1]: Starting Kubernetes Kubelet... Dec 10 10:45:18 crc restorecon[4681]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:18 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 10:45:19 crc restorecon[4681]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 10 10:45:20 crc kubenswrapper[4682]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:45:20 crc kubenswrapper[4682]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 10 10:45:20 crc kubenswrapper[4682]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:45:20 crc kubenswrapper[4682]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:45:20 crc kubenswrapper[4682]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 10 10:45:20 crc kubenswrapper[4682]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.191615 4682 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198727 4682 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198764 4682 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198779 4682 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198790 4682 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198800 4682 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198811 4682 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198822 4682 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198832 4682 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198841 4682 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198850 4682 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198859 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198867 4682 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198889 4682 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198898 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198907 4682 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198915 4682 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198924 4682 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198932 4682 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198940 4682 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198948 4682 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198957 4682 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198968 4682 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198978 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198987 4682 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.198995 4682 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199004 4682 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199012 4682 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199021 4682 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199029 4682 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199037 4682 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199045 4682 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199054 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199063 4682 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199071 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199079 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199088 4682 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199096 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199106 4682 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199116 4682 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199125 4682 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199134 4682 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199143 4682 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199153 4682 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199163 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199171 4682 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199179 4682 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199189 4682 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199199 4682 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199207 4682 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199216 4682 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199224 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199233 4682 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199241 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199250 4682 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199258 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199266 4682 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199274 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199284 4682 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199293 4682 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199304 4682 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199314 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199324 4682 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199334 4682 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199346 4682 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199355 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199364 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199372 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199380 4682 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199389 4682 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199400 4682 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.199413 4682 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199631 4682 flags.go:64] FLAG: --address="0.0.0.0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199655 4682 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199675 4682 flags.go:64] FLAG: --anonymous-auth="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199691 4682 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199706 4682 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199719 4682 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199735 4682 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199750 4682 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199763 4682 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199775 4682 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199824 4682 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199838 4682 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199848 4682 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199859 4682 flags.go:64] FLAG: --cgroup-root="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199869 4682 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199879 4682 flags.go:64] FLAG: --client-ca-file="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199888 4682 flags.go:64] FLAG: --cloud-config="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199899 4682 flags.go:64] FLAG: --cloud-provider="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199912 4682 flags.go:64] FLAG: --cluster-dns="[]" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199926 4682 flags.go:64] FLAG: --cluster-domain="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199936 4682 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199946 4682 flags.go:64] FLAG: --config-dir="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199955 4682 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199966 4682 flags.go:64] FLAG: --container-log-max-files="5" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199978 4682 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199988 4682 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.199998 4682 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200007 4682 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200018 4682 flags.go:64] FLAG: --contention-profiling="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200028 4682 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200038 4682 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200049 4682 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200058 4682 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200070 4682 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200081 4682 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200090 4682 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200100 4682 flags.go:64] FLAG: --enable-load-reader="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200111 4682 flags.go:64] FLAG: --enable-server="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200121 4682 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200133 4682 flags.go:64] FLAG: --event-burst="100" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200143 4682 flags.go:64] FLAG: --event-qps="50" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200153 4682 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200163 4682 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200172 4682 flags.go:64] FLAG: --eviction-hard="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200184 4682 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200193 4682 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200203 4682 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200214 4682 flags.go:64] FLAG: --eviction-soft="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200224 4682 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200233 4682 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200245 4682 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200254 4682 flags.go:64] FLAG: --experimental-mounter-path="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200264 4682 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200274 4682 flags.go:64] FLAG: --fail-swap-on="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200283 4682 flags.go:64] FLAG: --feature-gates="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200295 4682 flags.go:64] FLAG: --file-check-frequency="20s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200305 4682 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200315 4682 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200326 4682 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200336 4682 flags.go:64] FLAG: --healthz-port="10248" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200347 4682 flags.go:64] FLAG: --help="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200358 4682 flags.go:64] FLAG: --hostname-override="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200368 4682 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200378 4682 flags.go:64] FLAG: --http-check-frequency="20s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200389 4682 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200400 4682 flags.go:64] FLAG: --image-credential-provider-config="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200409 4682 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200419 4682 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200428 4682 flags.go:64] FLAG: --image-service-endpoint="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200438 4682 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200448 4682 flags.go:64] FLAG: --kube-api-burst="100" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200457 4682 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200510 4682 flags.go:64] FLAG: --kube-api-qps="50" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200523 4682 flags.go:64] FLAG: --kube-reserved="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200535 4682 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200564 4682 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200580 4682 flags.go:64] FLAG: --kubelet-cgroups="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200591 4682 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200604 4682 flags.go:64] FLAG: --lock-file="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200617 4682 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200641 4682 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200661 4682 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200682 4682 flags.go:64] FLAG: --log-json-split-stream="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200694 4682 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200705 4682 flags.go:64] FLAG: --log-text-split-stream="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200717 4682 flags.go:64] FLAG: --logging-format="text" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200729 4682 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200741 4682 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200751 4682 flags.go:64] FLAG: --manifest-url="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200760 4682 flags.go:64] FLAG: --manifest-url-header="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200773 4682 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200782 4682 flags.go:64] FLAG: --max-open-files="1000000" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200793 4682 flags.go:64] FLAG: --max-pods="110" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200803 4682 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200812 4682 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200820 4682 flags.go:64] FLAG: --memory-manager-policy="None" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200829 4682 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200839 4682 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200848 4682 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200857 4682 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200882 4682 flags.go:64] FLAG: --node-status-max-images="50" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200891 4682 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200901 4682 flags.go:64] FLAG: --oom-score-adj="-999" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200910 4682 flags.go:64] FLAG: --pod-cidr="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200919 4682 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200932 4682 flags.go:64] FLAG: --pod-manifest-path="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200942 4682 flags.go:64] FLAG: --pod-max-pids="-1" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200950 4682 flags.go:64] FLAG: --pods-per-core="0" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200959 4682 flags.go:64] FLAG: --port="10250" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200968 4682 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200977 4682 flags.go:64] FLAG: --provider-id="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200985 4682 flags.go:64] FLAG: --qos-reserved="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.200995 4682 flags.go:64] FLAG: --read-only-port="10255" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201004 4682 flags.go:64] FLAG: --register-node="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201014 4682 flags.go:64] FLAG: --register-schedulable="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201023 4682 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201039 4682 flags.go:64] FLAG: --registry-burst="10" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201048 4682 flags.go:64] FLAG: --registry-qps="5" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201057 4682 flags.go:64] FLAG: --reserved-cpus="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201066 4682 flags.go:64] FLAG: --reserved-memory="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201077 4682 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201086 4682 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201096 4682 flags.go:64] FLAG: --rotate-certificates="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201105 4682 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201113 4682 flags.go:64] FLAG: --runonce="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201122 4682 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201131 4682 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201141 4682 flags.go:64] FLAG: --seccomp-default="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201150 4682 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201159 4682 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201168 4682 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201177 4682 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201186 4682 flags.go:64] FLAG: --storage-driver-password="root" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201195 4682 flags.go:64] FLAG: --storage-driver-secure="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201204 4682 flags.go:64] FLAG: --storage-driver-table="stats" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201213 4682 flags.go:64] FLAG: --storage-driver-user="root" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201223 4682 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201232 4682 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201241 4682 flags.go:64] FLAG: --system-cgroups="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201250 4682 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201264 4682 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201272 4682 flags.go:64] FLAG: --tls-cert-file="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201281 4682 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201293 4682 flags.go:64] FLAG: --tls-min-version="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201302 4682 flags.go:64] FLAG: --tls-private-key-file="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201311 4682 flags.go:64] FLAG: --topology-manager-policy="none" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201321 4682 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201330 4682 flags.go:64] FLAG: --topology-manager-scope="container" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201339 4682 flags.go:64] FLAG: --v="2" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201352 4682 flags.go:64] FLAG: --version="false" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201364 4682 flags.go:64] FLAG: --vmodule="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201374 4682 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.201384 4682 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201650 4682 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201688 4682 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201694 4682 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201699 4682 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201705 4682 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201710 4682 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201717 4682 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201723 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201735 4682 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201741 4682 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201746 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201751 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201760 4682 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201769 4682 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201785 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201793 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201800 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201807 4682 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201814 4682 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201822 4682 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201829 4682 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201836 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201841 4682 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201848 4682 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201854 4682 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201860 4682 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201871 4682 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201877 4682 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201883 4682 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201890 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201896 4682 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201902 4682 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201908 4682 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201914 4682 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201921 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201926 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201933 4682 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201939 4682 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201945 4682 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201951 4682 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201962 4682 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201968 4682 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201974 4682 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201982 4682 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201990 4682 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.201999 4682 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202006 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202014 4682 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202021 4682 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202028 4682 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202035 4682 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202042 4682 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202048 4682 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202055 4682 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202061 4682 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202070 4682 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202076 4682 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202084 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202095 4682 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202101 4682 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202108 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202115 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202122 4682 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202128 4682 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202135 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202142 4682 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202148 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202154 4682 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202161 4682 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202167 4682 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.202173 4682 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.202184 4682 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.218508 4682 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.218572 4682 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218724 4682 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218747 4682 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218758 4682 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218768 4682 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218776 4682 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218785 4682 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218792 4682 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218801 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218808 4682 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218816 4682 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218825 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218834 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218843 4682 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218851 4682 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218860 4682 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218868 4682 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218876 4682 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218884 4682 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218892 4682 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218899 4682 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218907 4682 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218915 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218923 4682 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218932 4682 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218939 4682 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218950 4682 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218962 4682 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218972 4682 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218981 4682 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218989 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.218997 4682 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219005 4682 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219015 4682 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219023 4682 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219034 4682 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219044 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219052 4682 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219061 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219069 4682 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219077 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219084 4682 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219092 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219100 4682 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219108 4682 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219116 4682 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219126 4682 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219135 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219143 4682 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219152 4682 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219163 4682 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219174 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219183 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219194 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219202 4682 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219211 4682 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219220 4682 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219228 4682 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219237 4682 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219245 4682 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219253 4682 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219261 4682 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219269 4682 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219277 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219285 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219293 4682 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219302 4682 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219312 4682 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219323 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219331 4682 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219341 4682 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219349 4682 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.219362 4682 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219627 4682 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219644 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219654 4682 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219663 4682 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219671 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219679 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219687 4682 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219695 4682 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219704 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219711 4682 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219719 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219727 4682 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219735 4682 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219743 4682 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219751 4682 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219758 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219766 4682 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219774 4682 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219783 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219790 4682 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219798 4682 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219806 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219814 4682 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219822 4682 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219830 4682 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219839 4682 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219847 4682 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219855 4682 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219864 4682 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219872 4682 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219882 4682 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219892 4682 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219900 4682 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219910 4682 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219919 4682 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219928 4682 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219936 4682 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219944 4682 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219954 4682 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219963 4682 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219971 4682 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219979 4682 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219986 4682 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.219995 4682 feature_gate.go:330] unrecognized feature gate: Example Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220003 4682 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220011 4682 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220018 4682 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220026 4682 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220034 4682 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220042 4682 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220050 4682 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220059 4682 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220068 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220076 4682 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220084 4682 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220094 4682 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220105 4682 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220115 4682 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220125 4682 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220136 4682 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220144 4682 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220153 4682 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220161 4682 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220170 4682 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220177 4682 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220186 4682 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220194 4682 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220204 4682 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220213 4682 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220222 4682 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.220231 4682 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.220244 4682 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.220862 4682 server.go:940] "Client rotation is on, will bootstrap in background" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.229012 4682 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.229187 4682 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.230237 4682 server.go:997] "Starting client certificate rotation" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.230288 4682 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.230522 4682 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-24 10:01:45.58633756 +0000 UTC Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.230644 4682 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 335h16m25.355697421s for next certificate rotation Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.242723 4682 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.245363 4682 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.259585 4682 log.go:25] "Validated CRI v1 runtime API" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.280180 4682 log.go:25] "Validated CRI v1 image API" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.281926 4682 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.286528 4682 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-10-10-40-35-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.286558 4682 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.300978 4682 manager.go:217] Machine: {Timestamp:2025-12-10 10:45:20.298222537 +0000 UTC m=+0.618433307 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:1e536030-2097-46d6-9de1-5c5492a935f2 BootID:f31d3580-1486-4f05-9f92-ad8676a17c6a Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:8b:8b:52 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:8b:8b:52 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:df:84:6c Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:1a:96:16 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:8a:50:16 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:e6:de:28 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:e6:8c:b3:a5:8e:3e Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:42:c3:63:20:cb:ca Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.302865 4682 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.303408 4682 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.304192 4682 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.304617 4682 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.304679 4682 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.305375 4682 topology_manager.go:138] "Creating topology manager with none policy" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.305403 4682 container_manager_linux.go:303] "Creating device plugin manager" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.305797 4682 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.305870 4682 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.306538 4682 state_mem.go:36] "Initialized new in-memory state store" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.306819 4682 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.308098 4682 kubelet.go:418] "Attempting to sync node with API server" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.308136 4682 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.308166 4682 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.308191 4682 kubelet.go:324] "Adding apiserver pod source" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.308214 4682 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.310641 4682 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.313304 4682 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.317036 4682 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.317719 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.317859 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.317932 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.317965 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.317979 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.317991 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318010 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318022 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318033 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.317952 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.318131 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318053 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318257 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318281 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318299 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318311 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.318677 4682 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.319348 4682 server.go:1280] "Started kubelet" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.319663 4682 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.319772 4682 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.320278 4682 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 10 10:45:20 crc systemd[1]: Started Kubernetes Kubelet. Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.322543 4682 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.322581 4682 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.322675 4682 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.323318 4682 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 18:25:24.359828449 +0000 UTC Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.323392 4682 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 31h40m4.036441785s for next certificate rotation Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.323571 4682 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.323590 4682 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.323648 4682 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.323848 4682 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.325105 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.325174 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.325774 4682 factory.go:153] Registering CRI-O factory Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.325865 4682 factory.go:221] Registration of the crio container factory successfully Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.325989 4682 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.326091 4682 factory.go:55] Registering systemd factory Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.326162 4682 factory.go:221] Registration of the systemd container factory successfully Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.326292 4682 factory.go:103] Registering Raw factory Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.326376 4682 manager.go:1196] Started watching for new ooms in manager Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.329772 4682 manager.go:319] Starting recovery of all containers Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.332213 4682 server.go:460] "Adding debug handlers to kubelet server" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.334584 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="200ms" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336204 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336293 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336308 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336324 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336336 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336349 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336362 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336373 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.335734 4682 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187fd4c4b810b54d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:45:20.319305037 +0000 UTC m=+0.639515787,LastTimestamp:2025-12-10 10:45:20.319305037 +0000 UTC m=+0.639515787,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336388 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.336546 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339092 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339157 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339190 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339224 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339245 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339267 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339288 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339310 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339331 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339351 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339372 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339394 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339417 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339442 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339514 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339547 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339575 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339599 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339631 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339654 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339675 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339696 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339717 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339737 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339767 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339789 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339812 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339834 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339856 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339912 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339945 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.339972 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340002 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340030 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340059 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340088 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340117 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340144 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340171 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340203 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340232 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340261 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340301 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340333 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340365 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340396 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340432 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340462 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340555 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340583 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340610 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340639 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.340670 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341445 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341584 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341618 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341646 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341675 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341707 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341737 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341765 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341794 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341825 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341852 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341880 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341909 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341933 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341959 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.341987 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342014 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342039 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342069 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342096 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342124 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342154 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342182 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342210 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342238 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342271 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342300 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342327 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342392 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.342422 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343459 4682 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343571 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343611 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343641 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343671 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343698 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343730 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343758 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343789 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343818 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343848 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343878 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343919 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343964 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.343998 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344030 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344075 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344110 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344142 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344181 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344217 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344257 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344288 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344321 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344365 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344397 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344428 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344462 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344553 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344583 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344610 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344638 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344666 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344691 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344720 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344751 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344782 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344811 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344848 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344877 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344907 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344936 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344965 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.344996 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345023 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345049 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345075 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345101 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345133 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345160 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345188 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345217 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345245 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345344 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345374 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345405 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345434 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345515 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345549 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345585 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345613 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345640 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345667 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345692 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345720 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345747 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345774 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345801 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345831 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345861 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345888 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345921 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345949 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.345975 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346004 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346034 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346065 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346088 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346110 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346138 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346168 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346208 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346239 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346270 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346302 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346331 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346398 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346432 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346465 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346623 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346656 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346692 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346721 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346754 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346784 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346855 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346901 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346930 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346964 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.346995 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347024 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347052 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347082 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347107 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347137 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347166 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347207 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347327 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347357 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347389 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347419 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347447 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347517 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347559 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347592 4682 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347619 4682 reconstruct.go:97] "Volume reconstruction finished" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.347639 4682 reconciler.go:26] "Reconciler: start to sync state" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.352929 4682 manager.go:324] Recovery completed Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.362811 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.367073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.367120 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.367132 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.368294 4682 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.368316 4682 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.368338 4682 state_mem.go:36] "Initialized new in-memory state store" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.378215 4682 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.379682 4682 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.379725 4682 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.379754 4682 kubelet.go:2335] "Starting kubelet main sync loop" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.379799 4682 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 10 10:45:20 crc kubenswrapper[4682]: W1210 10:45:20.380536 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.380624 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.424564 4682 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.480978 4682 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.487620 4682 policy_none.go:49] "None policy: Start" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.489292 4682 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.489351 4682 state_mem.go:35] "Initializing new in-memory state store" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.524963 4682 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.536135 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="400ms" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.625860 4682 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.633149 4682 manager.go:334] "Starting Device Plugin manager" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.633220 4682 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.633240 4682 server.go:79] "Starting device plugin registration server" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.633998 4682 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.634032 4682 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.634230 4682 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.634515 4682 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.634549 4682 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.646394 4682 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.681714 4682 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.681907 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.683615 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.683665 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.683677 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.683890 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.684536 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.684608 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.685241 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.685288 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.685307 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.685499 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.685652 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.685727 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686364 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686409 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686689 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686742 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686760 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.686976 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.687145 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.687225 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.687743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.687783 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.687799 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.689027 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.689245 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.689427 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.689030 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.689739 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.689764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.690192 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.690282 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.690489 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.691600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.691787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.691914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.691936 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.692103 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.692127 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.692687 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.692752 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.693926 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.693964 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.693981 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.734595 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.736470 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.736564 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.736586 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.736628 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.737259 4682 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753482 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753536 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753571 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753588 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753760 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753807 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753917 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.753998 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754036 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754058 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754085 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754105 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754146 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754185 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.754215 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856182 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856240 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856261 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856279 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856294 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856310 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856325 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856338 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856352 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856367 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856355 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856419 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856410 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856461 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856381 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856515 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856485 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856549 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856555 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856574 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856564 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856584 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856613 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856617 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856596 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856637 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856658 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856664 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856697 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.856882 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.937583 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.938315 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="800ms" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.940739 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.940786 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.940795 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:20 crc kubenswrapper[4682]: I1210 10:45:20.940830 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:20 crc kubenswrapper[4682]: E1210 10:45:20.941423 4682 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.009186 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.022633 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.044422 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.047799 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-503e0c7167112ae17767730163fff56b3314ab81c39c463a14539ad38b34c0cb WatchSource:0}: Error finding container 503e0c7167112ae17767730163fff56b3314ab81c39c463a14539ad38b34c0cb: Status 404 returned error can't find the container with id 503e0c7167112ae17767730163fff56b3314ab81c39c463a14539ad38b34c0cb Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.055600 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-d332357ef3c336c5f83a4814861c14bdae24b3febb47f56dfd4558f02cbcf21a WatchSource:0}: Error finding container d332357ef3c336c5f83a4814861c14bdae24b3febb47f56dfd4558f02cbcf21a: Status 404 returned error can't find the container with id d332357ef3c336c5f83a4814861c14bdae24b3febb47f56dfd4558f02cbcf21a Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.067452 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-05362a2a656e197fe022bff218fd63b95d4f0ad68bf6f167b39ac96c301b658f WatchSource:0}: Error finding container 05362a2a656e197fe022bff218fd63b95d4f0ad68bf6f167b39ac96c301b658f: Status 404 returned error can't find the container with id 05362a2a656e197fe022bff218fd63b95d4f0ad68bf6f167b39ac96c301b658f Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.070427 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.077889 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.084543 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-0bbd941a892dfe95ef795cd85aabbfe74ef97be690d9868afd0c35b2fcf8e571 WatchSource:0}: Error finding container 0bbd941a892dfe95ef795cd85aabbfe74ef97be690d9868afd0c35b2fcf8e571: Status 404 returned error can't find the container with id 0bbd941a892dfe95ef795cd85aabbfe74ef97be690d9868afd0c35b2fcf8e571 Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.105033 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-bff16547a67d2d39e669d9bf245935d9bbf88927be7c29e7c087f4309fa8d90b WatchSource:0}: Error finding container bff16547a67d2d39e669d9bf245935d9bbf88927be7c29e7c087f4309fa8d90b: Status 404 returned error can't find the container with id bff16547a67d2d39e669d9bf245935d9bbf88927be7c29e7c087f4309fa8d90b Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.324279 4682 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.341726 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.342836 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.342899 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.342908 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.342927 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:21 crc kubenswrapper[4682]: E1210 10:45:21.343360 4682 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.378803 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:21 crc kubenswrapper[4682]: E1210 10:45:21.378878 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.382790 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d332357ef3c336c5f83a4814861c14bdae24b3febb47f56dfd4558f02cbcf21a"} Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.383502 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"503e0c7167112ae17767730163fff56b3314ab81c39c463a14539ad38b34c0cb"} Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.384286 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bff16547a67d2d39e669d9bf245935d9bbf88927be7c29e7c087f4309fa8d90b"} Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.384980 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"0bbd941a892dfe95ef795cd85aabbfe74ef97be690d9868afd0c35b2fcf8e571"} Dec 10 10:45:21 crc kubenswrapper[4682]: I1210 10:45:21.385734 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"05362a2a656e197fe022bff218fd63b95d4f0ad68bf6f167b39ac96c301b658f"} Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.388318 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:21 crc kubenswrapper[4682]: E1210 10:45:21.388358 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:21 crc kubenswrapper[4682]: E1210 10:45:21.740009 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="1.6s" Dec 10 10:45:21 crc kubenswrapper[4682]: W1210 10:45:21.772163 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:21 crc kubenswrapper[4682]: E1210 10:45:21.772310 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:22 crc kubenswrapper[4682]: W1210 10:45:22.053813 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:22 crc kubenswrapper[4682]: E1210 10:45:22.053909 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.144250 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.146068 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.146106 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.146118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.146140 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:22 crc kubenswrapper[4682]: E1210 10:45:22.146643 4682 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.323748 4682 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.392025 4682 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e" exitCode=0 Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.392116 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.392218 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.393652 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.393689 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.393714 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.394791 4682 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294" exitCode=0 Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.394861 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.394977 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.396597 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.396666 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.396686 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.400817 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.400864 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.400885 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.403351 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0" exitCode=0 Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.403444 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.403518 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.404554 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.404587 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.404600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.405539 4682 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145" exitCode=0 Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.405566 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145"} Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.405747 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.406947 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.406985 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.406996 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.409560 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.410707 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.410743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:22 crc kubenswrapper[4682]: I1210 10:45:22.410758 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4682]: W1210 10:45:23.119388 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:23 crc kubenswrapper[4682]: E1210 10:45:23.119462 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.324005 4682 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:23 crc kubenswrapper[4682]: E1210 10:45:23.341685 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="3.2s" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.411092 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.411135 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.411146 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.411175 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.412296 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.412320 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.412328 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.413892 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.413976 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.414617 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.414636 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.414645 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.418635 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.418661 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.418670 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.420340 4682 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff" exitCode=0 Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.420391 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.420496 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.421336 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.421390 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.421405 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.422251 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"c445f7ada23dc8166a355739343a03d78c43a2ac04e2bab918d667ef9c206629"} Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.422281 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.422943 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.422979 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.422994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4682]: W1210 10:45:23.432336 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:23 crc kubenswrapper[4682]: E1210 10:45:23.432420 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.747059 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.748399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.748431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.748441 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:23 crc kubenswrapper[4682]: I1210 10:45:23.748462 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:23 crc kubenswrapper[4682]: E1210 10:45:23.748851 4682 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Dec 10 10:45:23 crc kubenswrapper[4682]: W1210 10:45:23.897450 4682 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Dec 10 10:45:23 crc kubenswrapper[4682]: E1210 10:45:23.897542 4682 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.214592 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.371187 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.428414 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe"} Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.428496 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4"} Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.428551 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.429588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.429648 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.429689 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430646 4682 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6" exitCode=0 Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430711 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6"} Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430764 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430799 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430847 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430906 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.430976 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.431592 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.431634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.431647 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.431996 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432033 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432051 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432087 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432071 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432160 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:24 crc kubenswrapper[4682]: I1210 10:45:24.432176 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.437263 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd"} Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.437316 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05"} Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.437331 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.437335 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c"} Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.437420 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.437504 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.438138 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.438166 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.438176 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.438835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.438853 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:25 crc kubenswrapper[4682]: I1210 10:45:25.438864 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.181295 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.385912 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.392923 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.444745 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5"} Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.444798 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.444798 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672"} Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.444908 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.445327 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.445392 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.446347 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.446395 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.446411 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.448693 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.448735 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.448809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.448845 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.448897 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.448909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.949750 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.951047 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.951079 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.951088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:26 crc kubenswrapper[4682]: I1210 10:45:26.951109 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.447024 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.447086 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.448075 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.448119 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.448139 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.448162 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.448187 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:27 crc kubenswrapper[4682]: I1210 10:45:27.448201 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.010065 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.010247 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.011898 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.012115 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.012220 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.991578 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.991778 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.991828 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.993791 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.993841 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:28 crc kubenswrapper[4682]: I1210 10:45:28.993858 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.507669 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.507914 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.509403 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.509461 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.509523 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4682]: E1210 10:45:30.647518 4682 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.697049 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.697258 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.699012 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.699052 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.699064 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.751728 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.752007 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.754078 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.754166 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:30 crc kubenswrapper[4682]: I1210 10:45:30.754184 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.537078 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.537247 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.538234 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.538300 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.538311 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.697889 4682 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:45:33 crc kubenswrapper[4682]: I1210 10:45:33.697968 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.148877 4682 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.148951 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.155173 4682 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.155394 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.375334 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.375694 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.376919 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.376952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:34 crc kubenswrapper[4682]: I1210 10:45:34.376962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.186261 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.186400 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.187321 4682 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.187389 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.187687 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.187734 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.187745 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.190628 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.471307 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.471797 4682 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.471855 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.472639 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.472669 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:36 crc kubenswrapper[4682]: I1210 10:45:36.472682 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.134893 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.136221 4682 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.136397 4682 trace.go:236] Trace[1868651647]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:24.753) (total time: 14382ms): Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[1868651647]: ---"Objects listed" error: 14382ms (10:45:39.136) Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[1868651647]: [14.382404092s] [14.382404092s] END Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.136421 4682 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.136542 4682 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.138195 4682 trace.go:236] Trace[1164066961]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:27.800) (total time: 11337ms): Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[1164066961]: ---"Objects listed" error: 11337ms (10:45:39.138) Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[1164066961]: [11.337482306s] [11.337482306s] END Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.138234 4682 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.139231 4682 trace.go:236] Trace[2001078179]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:28.625) (total time: 10513ms): Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[2001078179]: ---"Objects listed" error: 10513ms (10:45:39.138) Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[2001078179]: [10.513585685s] [10.513585685s] END Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.139265 4682 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.140351 4682 trace.go:236] Trace[1431126179]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 10:45:27.521) (total time: 11618ms): Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[1431126179]: ---"Objects listed" error: 11618ms (10:45:39.140) Dec 10 10:45:39 crc kubenswrapper[4682]: Trace[1431126179]: [11.618399176s] [11.618399176s] END Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.140392 4682 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.322106 4682 apiserver.go:52] "Watching apiserver" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.324288 4682 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.324483 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.324771 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.324818 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.324830 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.324777 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.324891 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.325290 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.325327 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.325336 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.325576 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.330691 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.330701 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.330795 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.330843 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.331098 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.331168 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.331189 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.331369 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.331443 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.358386 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.369170 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.387321 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.399216 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.416067 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.424362 4682 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.431653 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438193 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438263 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438291 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438317 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438352 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438375 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438394 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438411 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438432 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438459 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438494 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438509 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438527 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438546 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438564 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438582 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438607 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438631 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438648 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438665 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438688 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438708 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438727 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438746 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438768 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438760 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438787 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438770 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438806 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438826 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438845 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438839 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438864 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438884 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438905 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438925 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438946 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438964 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438981 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.438997 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439003 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439021 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439020 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439084 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439115 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439141 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439165 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439188 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439214 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439237 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439266 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439296 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439320 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439346 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439371 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439396 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439421 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439447 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439490 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439514 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439542 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439574 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439607 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439638 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439661 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439683 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439706 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439733 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439773 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439797 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439821 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439843 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439031 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439122 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439203 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439226 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439253 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439260 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439287 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439334 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439381 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439418 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439501 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439582 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439589 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439765 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439867 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.439955 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440124 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440151 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440154 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440185 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440199 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440354 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440465 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440504 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440520 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440536 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440561 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440604 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440619 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440721 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440774 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440807 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440886 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.440934 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441005 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441038 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441063 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441088 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441111 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441134 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441158 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441184 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441209 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441233 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441260 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441296 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441322 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441347 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441374 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441402 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441429 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441451 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441496 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441519 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441543 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441576 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441604 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441628 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441651 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441672 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441694 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441718 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441741 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441764 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441786 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441810 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441837 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441859 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441880 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441903 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442033 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442065 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442092 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442114 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442172 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442198 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442219 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442244 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442265 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442286 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442310 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442332 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442354 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442376 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442400 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442424 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442447 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441008 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441086 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441117 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442711 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442725 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442761 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442784 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442807 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442830 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442855 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442883 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442908 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442932 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442955 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442978 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443004 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443029 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443054 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443079 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443106 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443130 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443153 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443176 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443218 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443242 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443267 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443294 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443322 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443348 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443378 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443410 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443438 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443630 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443665 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443695 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443959 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443994 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444027 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444057 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444094 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444124 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444159 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444198 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444230 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444260 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444289 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444324 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444355 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444389 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444421 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444488 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444520 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444551 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444582 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444612 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444645 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444678 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444708 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444741 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444789 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444832 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444905 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444989 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445026 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445073 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445105 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445141 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445173 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445206 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445237 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445267 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445301 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445334 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445702 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445827 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445865 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445901 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445935 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445969 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446043 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446082 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446126 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446193 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446226 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446260 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446299 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446343 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446379 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446412 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446685 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446723 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446753 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446788 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446896 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446940 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446963 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446982 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.446999 4682 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447019 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447038 4682 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447058 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447075 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447093 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447111 4682 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447128 4682 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447147 4682 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447167 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447185 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447204 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447222 4682 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447240 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447257 4682 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447274 4682 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447292 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447310 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447327 4682 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447345 4682 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447364 4682 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447382 4682 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447401 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447418 4682 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447453 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447492 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447512 4682 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447529 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447546 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447562 4682 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447580 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448381 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448405 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448424 4682 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448444 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448463 4682 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448502 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448524 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448546 4682 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441128 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441185 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441206 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441235 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441301 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441322 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441341 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441360 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.441374 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.442886 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443440 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.443559 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444004 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444302 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444342 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444609 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.444824 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445027 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445504 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.445712 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447463 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447852 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.447892 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448015 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448064 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448387 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.449362 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:39.949336838 +0000 UTC m=+20.269547598 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449405 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449493 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449582 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449699 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449849 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449871 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449970 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450043 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450085 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450061 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448561 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450218 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448717 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448944 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449098 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.449147 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.450356 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450676 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450725 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450817 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450859 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.450958 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.451009 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.451447 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.451846 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:39.951785325 +0000 UTC m=+20.271996085 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.451953 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.452084 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.452161 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.452465 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.452610 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.452865 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.453198 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.453294 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.453427 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.454025 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.454304 4682 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.454464 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.458084 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:39.9580658 +0000 UTC m=+20.278276550 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.458618 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.458707 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.458725 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.458861 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.458880 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.459022 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.459301 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.459348 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.459578 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.459714 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.459881 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.460090 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.460242 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.460582 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.460620 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.461006 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.461392 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.461751 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.463066 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.465324 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.465672 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.465734 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.465828 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.466097 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.466108 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.448527 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.466245 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.466559 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.467155 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.467194 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.467491 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.467521 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.467831 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.467906 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468009 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468121 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468172 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468125 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468288 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468731 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.468742 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.469220 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.473673 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.473701 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.473759 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.474364 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.474447 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.474555 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.474769 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.474905 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.474923 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.474938 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.474998 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:39.974972966 +0000 UTC m=+20.295183716 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.475361 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.475670 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.475646 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.475936 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.475975 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.476191 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.476640 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.476639 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.476749 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.475380 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.476784 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.476824 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:39.976810774 +0000 UTC m=+20.297021524 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.477846 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.477991 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.478034 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.478401 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.478818 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.478934 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.479181 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.479370 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.479543 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.479579 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.479977 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.481019 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.483317 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.486540 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.487021 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.487804 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.487840 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.487829 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488064 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488116 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488329 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488376 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488551 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488529 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488826 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.488894 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.489204 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.489235 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.489516 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.489654 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.489706 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.489849 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.490252 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.490619 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.490830 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.490947 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.490998 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.491176 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.491275 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.491309 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.497844 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.511348 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.516203 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.522078 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549624 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549674 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549730 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549745 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549755 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549765 4682 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549773 4682 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549783 4682 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549793 4682 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549802 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549811 4682 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549821 4682 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549829 4682 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549837 4682 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549845 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549853 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549860 4682 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549868 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549876 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549885 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549895 4682 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549902 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549910 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549919 4682 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549927 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549936 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549944 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549951 4682 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549959 4682 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549967 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549975 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549982 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549990 4682 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.549999 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550006 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550014 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550022 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550030 4682 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550038 4682 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550045 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550053 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550061 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550070 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550083 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550090 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550099 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550107 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550115 4682 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550124 4682 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550131 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550139 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550149 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550157 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550165 4682 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550175 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550183 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550192 4682 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550200 4682 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550208 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550216 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550267 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550276 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550285 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550293 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550301 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550309 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550318 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550327 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550338 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550350 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550362 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550370 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550378 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550387 4682 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550395 4682 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550405 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550413 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550421 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550430 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550438 4682 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550446 4682 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550454 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550462 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550491 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550509 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550518 4682 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550527 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550534 4682 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550543 4682 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550551 4682 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550559 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550566 4682 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550576 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550584 4682 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550593 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550601 4682 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550608 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550616 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550623 4682 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550631 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550638 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550647 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550659 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550667 4682 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550674 4682 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550682 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550690 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550700 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550707 4682 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550715 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550723 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550730 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550738 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550745 4682 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550753 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550760 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550769 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550777 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550785 4682 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550792 4682 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550800 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550809 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550820 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550830 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550841 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550852 4682 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550862 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550870 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550879 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550887 4682 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550895 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550903 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550913 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550925 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550935 4682 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550945 4682 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550957 4682 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550964 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550972 4682 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550982 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550992 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.550999 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551008 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551016 4682 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551024 4682 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551032 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551041 4682 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551049 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551058 4682 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551067 4682 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551074 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551082 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551089 4682 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551270 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.551308 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.640671 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.648275 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.656185 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 10:45:39 crc kubenswrapper[4682]: W1210 10:45:39.668277 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-753401485cfa346aa69714dfaeb955f1c31a770a8e7d251274b3560b7e4340ec WatchSource:0}: Error finding container 753401485cfa346aa69714dfaeb955f1c31a770a8e7d251274b3560b7e4340ec: Status 404 returned error can't find the container with id 753401485cfa346aa69714dfaeb955f1c31a770a8e7d251274b3560b7e4340ec Dec 10 10:45:39 crc kubenswrapper[4682]: W1210 10:45:39.673290 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-266571af58a5d7bc25e4b0f1196fcb4df594d3a87942c70b247fcc8b3e7df764 WatchSource:0}: Error finding container 266571af58a5d7bc25e4b0f1196fcb4df594d3a87942c70b247fcc8b3e7df764: Status 404 returned error can't find the container with id 266571af58a5d7bc25e4b0f1196fcb4df594d3a87942c70b247fcc8b3e7df764 Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.955902 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:39 crc kubenswrapper[4682]: I1210 10:45:39.956041 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.956187 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.956189 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:40.956145676 +0000 UTC m=+21.276356476 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:39 crc kubenswrapper[4682]: E1210 10:45:39.956330 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:40.956305202 +0000 UTC m=+21.276516002 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.057521 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.057567 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.057590 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057700 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057715 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057725 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057738 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057773 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:41.057759158 +0000 UTC m=+21.377969908 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057769 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057803 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:41.057786299 +0000 UTC m=+21.377997069 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057813 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057829 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.057898 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:41.057877752 +0000 UTC m=+21.378088512 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.380072 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:40 crc kubenswrapper[4682]: E1210 10:45:40.380275 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.385271 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.386128 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.388072 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.389206 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.389957 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.390549 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.390636 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.391136 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.391678 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.392311 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.392871 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.393353 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.394055 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.394531 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.395037 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.395531 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.396021 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.396565 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.396922 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.397431 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.398000 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.401280 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.401851 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.402248 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.403134 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.403252 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.403643 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.404650 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.405233 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.406054 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.406789 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.407590 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.408020 4682 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.408113 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.410014 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.410568 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.410931 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.412398 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.413326 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.413932 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.414955 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.415596 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.416380 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.416944 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.417919 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.417911 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.418641 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.419689 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.420341 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.421411 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.422323 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.423524 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.424082 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.425160 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.425872 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.426681 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.427688 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.435800 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.453208 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.463193 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.483756 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"753401485cfa346aa69714dfaeb955f1c31a770a8e7d251274b3560b7e4340ec"} Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.484721 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"3890e28dfcff3af3030d75a667a4ee6ce74bfbb11b3820f1238a1492261c02a4"} Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.486172 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"266571af58a5d7bc25e4b0f1196fcb4df594d3a87942c70b247fcc8b3e7df764"} Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.702175 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.706235 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.712632 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.859700 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.862989 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.885651 4682 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56426->192.168.126.11:17697: read: connection reset by peer" start-of-body= Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.885715 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56426->192.168.126.11:17697: read: connection reset by peer" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.886290 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.921030 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.950457 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.973080 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:40 crc kubenswrapper[4682]: I1210 10:45:40.993214 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.002394 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.010646 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.019697 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.021639 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.021702 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.021781 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.021807 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.021782533 +0000 UTC m=+23.341993283 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.021840 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.021831474 +0000 UTC m=+23.342042224 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.032368 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.042572 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.066106 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.122539 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.122593 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.122637 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122724 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122743 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122757 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122783 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122816 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.122797486 +0000 UTC m=+23.443008246 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122823 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122857 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122868 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122835 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.122826696 +0000 UTC m=+23.443037466 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.122915 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:43.122900809 +0000 UTC m=+23.443111559 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.310840 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-xkwtt"] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.311376 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.313894 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.314861 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.316808 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.324180 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d9072c21-61ad-489f-8603-5f5699ad5d31-hosts-file\") pod \"node-resolver-xkwtt\" (UID: \"d9072c21-61ad-489f-8603-5f5699ad5d31\") " pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.324231 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnw8q\" (UniqueName: \"kubernetes.io/projected/d9072c21-61ad-489f-8603-5f5699ad5d31-kube-api-access-jnw8q\") pod \"node-resolver-xkwtt\" (UID: \"d9072c21-61ad-489f-8603-5f5699ad5d31\") " pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.326380 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.338142 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.350969 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.361859 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.372069 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.380315 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.380489 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.380326 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:41 crc kubenswrapper[4682]: E1210 10:45:41.380586 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.384558 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.399213 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.414738 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.425041 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d9072c21-61ad-489f-8603-5f5699ad5d31-hosts-file\") pod \"node-resolver-xkwtt\" (UID: \"d9072c21-61ad-489f-8603-5f5699ad5d31\") " pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.425096 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnw8q\" (UniqueName: \"kubernetes.io/projected/d9072c21-61ad-489f-8603-5f5699ad5d31-kube-api-access-jnw8q\") pod \"node-resolver-xkwtt\" (UID: \"d9072c21-61ad-489f-8603-5f5699ad5d31\") " pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.425184 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d9072c21-61ad-489f-8603-5f5699ad5d31-hosts-file\") pod \"node-resolver-xkwtt\" (UID: \"d9072c21-61ad-489f-8603-5f5699ad5d31\") " pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.443960 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnw8q\" (UniqueName: \"kubernetes.io/projected/d9072c21-61ad-489f-8603-5f5699ad5d31-kube-api-access-jnw8q\") pod \"node-resolver-xkwtt\" (UID: \"d9072c21-61ad-489f-8603-5f5699ad5d31\") " pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.489886 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.491728 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe" exitCode=255 Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.491788 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe"} Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.493190 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d"} Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.496139 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa"} Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.501901 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.509752 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.518873 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.532393 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.544504 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.544646 4682 scope.go:117] "RemoveContainer" containerID="5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.545291 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.558132 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.568721 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.579157 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.588413 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.597513 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.608617 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.622401 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.622540 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xkwtt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.626004 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-zs6ss"] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.626278 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-58skk"] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.626442 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.626526 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.629022 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.629851 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.629977 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.630103 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.630205 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.630551 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.630629 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.630731 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.630812 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.631880 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.634608 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.636346 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-v27lh"] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.637093 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.646971 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.647174 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 10 10:45:41 crc kubenswrapper[4682]: W1210 10:45:41.650703 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9072c21_61ad_489f_8603_5f5699ad5d31.slice/crio-2b67c05332127abc2f44c9d49dfbb6a37f59ccb986d705acb3891fde26c8c895 WatchSource:0}: Error finding container 2b67c05332127abc2f44c9d49dfbb6a37f59ccb986d705acb3891fde26c8c895: Status 404 returned error can't find the container with id 2b67c05332127abc2f44c9d49dfbb6a37f59ccb986d705acb3891fde26c8c895 Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.655396 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.667131 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.677185 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.689461 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.703366 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.712642 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.722355 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.726823 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-os-release\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.726861 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.726876 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-cnibin\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.726922 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cni-binary-copy\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.726967 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cnibin\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.726999 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-system-cni-dir\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.727118 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.727163 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmz79\" (UniqueName: \"kubernetes.io/projected/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-kube-api-access-zmz79\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.727215 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-cni-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.727231 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-os-release\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.727264 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-system-cni-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.732904 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.742937 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.754822 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.768162 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.779986 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.791513 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.799252 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.809457 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.820322 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828600 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828654 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a005c959-3805-4e15-aa3a-7093815e03b8-cni-binary-copy\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828682 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cnibin\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828705 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-cni-bin\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828746 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cnibin\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828793 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b504d5b4-49dc-499d-b17c-957131ba411e-rootfs\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828859 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b504d5b4-49dc-499d-b17c-957131ba411e-mcd-auth-proxy-config\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828901 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-cni-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828920 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-system-cni-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828941 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-netns\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.828961 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-conf-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829059 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-system-cni-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829094 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cni-binary-copy\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829160 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-cnibin\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829191 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a005c959-3805-4e15-aa3a-7093815e03b8-multus-daemon-config\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829220 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-socket-dir-parent\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829214 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-cni-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829247 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b504d5b4-49dc-499d-b17c-957131ba411e-proxy-tls\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829294 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-cnibin\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829308 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-system-cni-dir\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829344 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wd7rh\" (UniqueName: \"kubernetes.io/projected/a005c959-3805-4e15-aa3a-7093815e03b8-kube-api-access-wd7rh\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829361 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-system-cni-dir\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829383 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829405 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmz79\" (UniqueName: \"kubernetes.io/projected/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-kube-api-access-zmz79\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829420 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-cni-multus\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829435 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvhsf\" (UniqueName: \"kubernetes.io/projected/b504d5b4-49dc-499d-b17c-957131ba411e-kube-api-access-rvhsf\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829451 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-kubelet\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829502 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-os-release\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829523 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-multus-certs\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829538 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-etc-kubernetes\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829570 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-os-release\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829591 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-k8s-cni-cncf-io\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829623 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-hostroot\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829821 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-os-release\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.829893 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-os-release\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.885294 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cni-binary-copy\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.885397 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.889542 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmz79\" (UniqueName: \"kubernetes.io/projected/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-kube-api-access-zmz79\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.899061 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930640 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-netns\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930675 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-conf-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930697 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a005c959-3805-4e15-aa3a-7093815e03b8-multus-daemon-config\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930713 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-socket-dir-parent\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930729 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b504d5b4-49dc-499d-b17c-957131ba411e-proxy-tls\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930753 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wd7rh\" (UniqueName: \"kubernetes.io/projected/a005c959-3805-4e15-aa3a-7093815e03b8-kube-api-access-wd7rh\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930752 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-netns\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930772 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-cni-multus\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930815 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-cni-multus\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930854 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvhsf\" (UniqueName: \"kubernetes.io/projected/b504d5b4-49dc-499d-b17c-957131ba411e-kube-api-access-rvhsf\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930862 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-socket-dir-parent\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930888 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-kubelet\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930800 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-multus-conf-dir\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930933 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-multus-certs\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930965 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-multus-certs\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.930981 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-etc-kubernetes\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931007 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-kubelet\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931013 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-etc-kubernetes\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931030 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-k8s-cni-cncf-io\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931050 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-hostroot\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931075 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a005c959-3805-4e15-aa3a-7093815e03b8-cni-binary-copy\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931096 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-cni-bin\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931113 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b504d5b4-49dc-499d-b17c-957131ba411e-rootfs\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931115 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-hostroot\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931115 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-run-k8s-cni-cncf-io\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931139 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b504d5b4-49dc-499d-b17c-957131ba411e-mcd-auth-proxy-config\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931157 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a005c959-3805-4e15-aa3a-7093815e03b8-host-var-lib-cni-bin\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931206 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b504d5b4-49dc-499d-b17c-957131ba411e-rootfs\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931491 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a005c959-3805-4e15-aa3a-7093815e03b8-multus-daemon-config\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.931766 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a005c959-3805-4e15-aa3a-7093815e03b8-cni-binary-copy\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.932035 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b504d5b4-49dc-499d-b17c-957131ba411e-mcd-auth-proxy-config\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.933956 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b504d5b4-49dc-499d-b17c-957131ba411e-proxy-tls\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.947305 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wd7rh\" (UniqueName: \"kubernetes.io/projected/a005c959-3805-4e15-aa3a-7093815e03b8-kube-api-access-wd7rh\") pod \"multus-zs6ss\" (UID: \"a005c959-3805-4e15-aa3a-7093815e03b8\") " pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.947993 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvhsf\" (UniqueName: \"kubernetes.io/projected/b504d5b4-49dc-499d-b17c-957131ba411e-kube-api-access-rvhsf\") pod \"machine-config-daemon-58skk\" (UID: \"b504d5b4-49dc-499d-b17c-957131ba411e\") " pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.961745 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-zs6ss" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.981803 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.995404 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vmhkf"] Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.996672 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.998831 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.999618 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.999854 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.999909 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 10 10:45:41 crc kubenswrapper[4682]: I1210 10:45:41.999912 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:41.999858 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.000214 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.009915 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031809 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-kubelet\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031840 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-log-socket\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031857 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-ovn\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031871 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-ovn-kubernetes\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031889 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-slash\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031903 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-systemd-units\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031918 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-config\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031930 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-env-overrides\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031951 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031968 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-var-lib-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.031982 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk8dd\" (UniqueName: \"kubernetes.io/projected/0d4402e6-a6f6-4970-8392-9f1856b52eb4-kube-api-access-hk8dd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032000 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-netns\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032016 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-etc-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032030 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovn-node-metrics-cert\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032051 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-node-log\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032064 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-bin\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032077 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032091 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-netd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032106 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-script-lib\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.032124 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-systemd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.120970 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/19919360-1e01-4b1c-a2fe-d7b0f7b582c4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-v27lh\" (UID: \"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\") " pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133296 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-kubelet\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133339 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-log-socket\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133366 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-ovn\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133398 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-ovn-kubernetes\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133444 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-slash\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133497 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-systemd-units\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133526 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-config\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133549 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-env-overrides\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133583 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133605 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-var-lib-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133626 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk8dd\" (UniqueName: \"kubernetes.io/projected/0d4402e6-a6f6-4970-8392-9f1856b52eb4-kube-api-access-hk8dd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133651 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-netns\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133670 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-etc-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133689 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovn-node-metrics-cert\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133720 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-node-log\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133744 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-bin\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133765 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133785 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-netd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133806 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-script-lib\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133835 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-systemd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133903 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-systemd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133942 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-kubelet\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.133973 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-log-socket\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134002 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-ovn\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134035 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-ovn-kubernetes\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134065 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-slash\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134094 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-systemd-units\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134782 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-etc-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134784 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-bin\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134914 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-config\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.134974 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.135008 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-netd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.135426 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-node-log\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.135541 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-var-lib-openvswitch\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.135595 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.135969 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-netns\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.136068 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-env-overrides\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.137262 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.138384 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-script-lib\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.140777 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovn-node-metrics-cert\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.159572 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.177283 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk8dd\" (UniqueName: \"kubernetes.io/projected/0d4402e6-a6f6-4970-8392-9f1856b52eb4-kube-api-access-hk8dd\") pod \"ovnkube-node-vmhkf\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.198796 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.211983 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.230130 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.239916 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.247959 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.256625 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.267636 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.278932 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.290622 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.291388 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-v27lh" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.303624 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.309067 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:42 crc kubenswrapper[4682]: W1210 10:45:42.322158 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4402e6_a6f6_4970_8392_9f1856b52eb4.slice/crio-77f383ee433230c9780e7ed96b643708b6843c7a007b8329f8cb05d0f0af997f WatchSource:0}: Error finding container 77f383ee433230c9780e7ed96b643708b6843c7a007b8329f8cb05d0f0af997f: Status 404 returned error can't find the container with id 77f383ee433230c9780e7ed96b643708b6843c7a007b8329f8cb05d0f0af997f Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.380748 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:42 crc kubenswrapper[4682]: E1210 10:45:42.380888 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.502410 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13"} Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.503196 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerStarted","Data":"0b1397b4f27dde9bb9280f3c5a7e41bf898c5dfdc082ef767c863e295fe51651"} Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.512754 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xkwtt" event={"ID":"d9072c21-61ad-489f-8603-5f5699ad5d31","Type":"ContainerStarted","Data":"2b67c05332127abc2f44c9d49dfbb6a37f59ccb986d705acb3891fde26c8c895"} Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.515662 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.516844 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372"} Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.518323 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"77f383ee433230c9780e7ed96b643708b6843c7a007b8329f8cb05d0f0af997f"} Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.519453 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"d411d3aa90ac11a904f9ba963f31ce306151c86a93d0becc29968f5a21b2fe1b"} Dec 10 10:45:42 crc kubenswrapper[4682]: I1210 10:45:42.520592 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerStarted","Data":"87faf91818366713f23db2b331c49f8b5bf2a7ec3f91d2b474826083653dd038"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.042446 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.042608 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:47.042585146 +0000 UTC m=+27.362795906 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.042781 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.042908 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.042961 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:47.042948158 +0000 UTC m=+27.363158928 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.144190 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.144240 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.144262 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144425 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144427 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144642 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144661 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144443 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144713 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144721 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144619 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:47.14458576 +0000 UTC m=+27.464796510 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144764 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:47.144748845 +0000 UTC m=+27.464959585 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.144777 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:47.144771625 +0000 UTC m=+27.464982375 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.211329 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-9s7rc"] Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.211763 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.214128 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.214410 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.214578 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.215272 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.226134 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.239848 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.245369 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-host\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.245428 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxzcx\" (UniqueName: \"kubernetes.io/projected/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-kube-api-access-rxzcx\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.245607 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-serviceca\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.250728 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.261571 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.277179 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.288996 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.307294 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.317775 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.327956 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.341635 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.346217 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-host\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.346257 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxzcx\" (UniqueName: \"kubernetes.io/projected/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-kube-api-access-rxzcx\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.346304 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-serviceca\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.347136 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-serviceca\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.347182 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-host\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.359825 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.365002 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxzcx\" (UniqueName: \"kubernetes.io/projected/f9ecb90c-92b0-4a1b-b7a2-21da0f41d594-kube-api-access-rxzcx\") pod \"node-ca-9s7rc\" (UID: \"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\") " pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.368793 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.378292 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.380422 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.380542 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.380431 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:43 crc kubenswrapper[4682]: E1210 10:45:43.380780 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.389445 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.523474 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-9s7rc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.529132 4682 generic.go:334] "Generic (PLEG): container finished" podID="19919360-1e01-4b1c-a2fe-d7b0f7b582c4" containerID="918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656" exitCode=0 Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.529216 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerDied","Data":"918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.538692 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xkwtt" event={"ID":"d9072c21-61ad-489f-8603-5f5699ad5d31","Type":"ContainerStarted","Data":"7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.540872 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.542843 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5" exitCode=0 Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.542996 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.546895 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.546948 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.550332 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerStarted","Data":"18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873"} Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.550601 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.557322 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.578334 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.579349 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.592904 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.597044 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.597294 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.616589 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.641908 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.656175 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.674148 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.688961 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.708493 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.730450 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.752949 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.765817 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.778559 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.789707 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.801306 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.815924 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.835211 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.868681 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.906246 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.925017 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.938873 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.950717 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.974114 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:43 crc kubenswrapper[4682]: I1210 10:45:43.987639 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:43Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.003316 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.015322 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.026534 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.040638 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.051597 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.381010 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:44 crc kubenswrapper[4682]: E1210 10:45:44.381177 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:44 crc kubenswrapper[4682]: I1210 10:45:44.554675 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-9s7rc" event={"ID":"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594","Type":"ContainerStarted","Data":"d20baf772b7e421cc010af5d3ed72320e653e4aca23acc5d0f3e2f10f8b60208"} Dec 10 10:45:44 crc kubenswrapper[4682]: E1210 10:45:44.564616 4682 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.380626 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.381302 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.380713 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.381533 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.539618 4682 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.541830 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.541876 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.541888 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.542598 4682 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.561157 4682 generic.go:334] "Generic (PLEG): container finished" podID="19919360-1e01-4b1c-a2fe-d7b0f7b582c4" containerID="ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5" exitCode=0 Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.561256 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerDied","Data":"ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.563141 4682 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.563301 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-9s7rc" event={"ID":"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594","Type":"ContainerStarted","Data":"a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.563399 4682 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.564440 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.564495 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.564507 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.564521 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.564532 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.573278 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.573342 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.573357 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.573366 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.573375 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.573384 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.582564 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.595103 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.595124 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.598893 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.598940 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.598952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.598971 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.598983 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.607816 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.615264 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.618491 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.618523 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.618534 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.618551 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.618563 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.619522 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.631132 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.631168 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.636504 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.636545 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.636555 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.636568 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.636577 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.648382 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.649775 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.651254 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.651279 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.651291 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.651308 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.651319 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.661819 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.664711 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: E1210 10:45:45.664817 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.667869 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.667924 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.667935 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.667948 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.667957 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.674070 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.686127 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.697601 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.712740 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.725849 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.737630 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.752318 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.769764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.769807 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.769816 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.769832 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.769842 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.773931 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.786775 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.797633 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.811705 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.831348 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.842914 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.853709 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.861590 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.872610 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.872643 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.872652 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.872667 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.872678 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.874255 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.886701 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.898018 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.913617 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.931958 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.943288 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.957111 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.972166 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.975104 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.975144 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.975156 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.975173 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:45 crc kubenswrapper[4682]: I1210 10:45:45.975184 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:45Z","lastTransitionTime":"2025-12-10T10:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.078357 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.078403 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.078416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.078436 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.078450 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.180735 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.180762 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.180770 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.180784 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.180794 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.283200 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.283238 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.283251 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.283267 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.283280 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.380371 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:46 crc kubenswrapper[4682]: E1210 10:45:46.380557 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.385636 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.385706 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.385725 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.385749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.385765 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.489752 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.489798 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.489812 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.489833 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.489847 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.580610 4682 generic.go:334] "Generic (PLEG): container finished" podID="19919360-1e01-4b1c-a2fe-d7b0f7b582c4" containerID="8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9" exitCode=0 Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.581467 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerDied","Data":"8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.592243 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.592289 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.592313 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.592338 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.592353 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.603420 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.619248 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.636407 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.650524 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.666762 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.686652 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.694961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.694996 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.695005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.695026 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.695035 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.700027 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.713547 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.731254 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.747199 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.757940 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.770977 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.791147 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.797518 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.797545 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.797554 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.797568 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.797577 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.806848 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.820333 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.901151 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.901184 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.901193 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.901207 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:46 crc kubenswrapper[4682]: I1210 10:45:46.901216 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:46Z","lastTransitionTime":"2025-12-10T10:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.004117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.004170 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.004187 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.004213 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.004231 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.083342 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.083521 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:45:55.083493543 +0000 UTC m=+35.403704293 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.083638 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.083839 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.083927 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:55.083903785 +0000 UTC m=+35.404114585 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.107591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.107638 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.107650 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.107667 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.107712 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.185331 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.185423 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.185638 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185705 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185821 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185866 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185891 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185920 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:55.185885638 +0000 UTC m=+35.506096428 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185988 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:55.18595021 +0000 UTC m=+35.506161000 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.185772 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.186049 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.186091 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.186164 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:55.186143816 +0000 UTC m=+35.506354606 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.210575 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.210666 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.210693 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.210728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.210749 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.314035 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.314456 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.314466 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.314515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.314533 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.381043 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.381043 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.381240 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:47 crc kubenswrapper[4682]: E1210 10:45:47.381313 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.416527 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.416578 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.416595 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.416613 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.416625 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.518954 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.518993 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.519004 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.519018 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.519027 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.587100 4682 generic.go:334] "Generic (PLEG): container finished" podID="19919360-1e01-4b1c-a2fe-d7b0f7b582c4" containerID="ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888" exitCode=0 Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.587180 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerDied","Data":"ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.595283 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.621825 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.621878 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.621895 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.621912 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.621927 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.624576 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.640469 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.656528 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.670175 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.681927 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.698692 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.711930 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.724600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.724641 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.724653 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.724670 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.724682 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.726920 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.742899 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.762608 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.774147 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.786408 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.800573 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.812018 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.825133 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:47Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.826449 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.826624 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.826709 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.826796 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.826877 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.929779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.930136 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.930300 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.930424 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:47 crc kubenswrapper[4682]: I1210 10:45:47.930610 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:47Z","lastTransitionTime":"2025-12-10T10:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.032899 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.033355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.033608 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.033790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.033952 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.136754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.136794 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.136808 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.136829 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.136843 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.239654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.239763 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.239780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.239802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.239817 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.343269 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.343305 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.343319 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.343334 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.343347 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.380163 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:48 crc kubenswrapper[4682]: E1210 10:45:48.380424 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.446665 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.446728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.446750 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.446799 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.446835 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.551121 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.551207 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.551293 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.551329 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.551355 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.605099 4682 generic.go:334] "Generic (PLEG): container finished" podID="19919360-1e01-4b1c-a2fe-d7b0f7b582c4" containerID="cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74" exitCode=0 Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.605152 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerDied","Data":"cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.637228 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.655204 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.655245 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.655255 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.655274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.655285 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.657172 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.672257 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.687675 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.699590 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.712556 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.725584 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.738773 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.751996 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.758083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.758114 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.758125 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.758139 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.758148 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.765474 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.782277 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.798885 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.811493 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.821784 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.832001 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:48Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.861067 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.861101 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.861109 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.861123 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.861134 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.964155 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.964203 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.964217 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.964234 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:48 crc kubenswrapper[4682]: I1210 10:45:48.964245 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:48Z","lastTransitionTime":"2025-12-10T10:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.068050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.068112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.068133 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.068158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.068175 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.171637 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.172141 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.172172 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.172197 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.172213 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.275277 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.275333 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.275352 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.275378 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.275392 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.378635 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.379113 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.379129 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.379149 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.379161 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.379892 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:49 crc kubenswrapper[4682]: E1210 10:45:49.379996 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.380312 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:49 crc kubenswrapper[4682]: E1210 10:45:49.380571 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.481963 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.482005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.482013 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.482029 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.482040 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.585517 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.585575 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.585587 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.585611 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.585628 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.612761 4682 generic.go:334] "Generic (PLEG): container finished" podID="19919360-1e01-4b1c-a2fe-d7b0f7b582c4" containerID="4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc" exitCode=0 Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.613270 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerDied","Data":"4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.619301 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.619656 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.641667 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.654416 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.658904 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.676880 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.691139 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.691273 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.691305 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.691313 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.691332 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.691343 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.703204 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.715504 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.727633 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.741902 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.756866 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.771543 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.785931 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.794698 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.794734 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.794747 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.794764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.794777 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.805660 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.819940 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.831936 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.842278 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.852007 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.874040 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.886443 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.897832 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.897880 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.897893 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.897913 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.897928 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:49Z","lastTransitionTime":"2025-12-10T10:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.901405 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.915845 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.934280 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.950504 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.965540 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:49 crc kubenswrapper[4682]: I1210 10:45:49.999709 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:49Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.000609 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.000650 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.000663 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.000680 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.000691 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.030328 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.047285 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.060543 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.082344 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.096223 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.103014 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.103050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.103058 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.103073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.103085 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.111537 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.205057 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.205110 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.205121 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.205143 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.205157 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.307450 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.307524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.307539 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.307558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.307571 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.380438 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:50 crc kubenswrapper[4682]: E1210 10:45:50.380619 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.397387 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.410712 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.410764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.410776 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.410795 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.410808 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.417679 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.437729 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.453363 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.469645 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.490650 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.512163 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.513308 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.513353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.513370 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.513387 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.513401 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.531532 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.552559 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.569409 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.582695 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.598624 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.616614 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.616675 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.616693 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.616712 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.616726 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.623258 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.628259 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" event={"ID":"19919360-1e01-4b1c-a2fe-d7b0f7b582c4","Type":"ContainerStarted","Data":"5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.628315 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.628690 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.637190 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.651247 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.657092 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.665826 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.678525 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.690827 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.700210 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.716874 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.718599 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.718619 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.718628 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.718642 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.718651 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.729180 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.741444 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.752685 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.768341 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.780715 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.792047 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.808839 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.820868 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.820914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.820927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.820944 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.820953 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.831873 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.846150 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.858451 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.873685 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.884539 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.895807 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.918105 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.922998 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.923036 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.923047 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.923064 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.923076 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:50Z","lastTransitionTime":"2025-12-10T10:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.930941 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.947038 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.966120 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.981688 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:50 crc kubenswrapper[4682]: I1210 10:45:50.995942 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.007295 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.026081 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.026127 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.026143 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.026166 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.026182 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.026197 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.042218 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.057538 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.074073 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.097638 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:51Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.128978 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.129023 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.129034 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.129053 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.129067 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.232019 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.232076 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.232086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.232105 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.232117 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.334655 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.334698 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.334708 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.334723 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.334735 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.380598 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:51 crc kubenswrapper[4682]: E1210 10:45:51.380705 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.380707 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:51 crc kubenswrapper[4682]: E1210 10:45:51.380961 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.437679 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.437723 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.437743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.437762 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.437778 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.540606 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.540787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.540810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.540832 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.540847 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.631754 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.643815 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.643867 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.643879 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.643904 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.643918 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.747006 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.747045 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.747053 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.747068 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.747080 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.848881 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.848922 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.848933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.848951 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.848963 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.950959 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.950992 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.951002 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.951015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:51 crc kubenswrapper[4682]: I1210 10:45:51.951023 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:51Z","lastTransitionTime":"2025-12-10T10:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.053610 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.053656 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.053667 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.053682 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.053694 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.156143 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.156183 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.156219 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.156246 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.156259 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.258854 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.258895 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.258909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.258926 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.258939 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.361739 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.361794 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.361806 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.361822 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.361832 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.380356 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:52 crc kubenswrapper[4682]: E1210 10:45:52.380583 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.465272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.465330 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.465342 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.465363 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.465375 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.567743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.567777 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.567787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.567800 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.567809 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.637241 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/0.log" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.641017 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73" exitCode=1 Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.641081 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.642226 4682 scope.go:117] "RemoveContainer" containerID="69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.657401 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.671345 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.671384 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.671397 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.671415 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.671426 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.680386 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.695738 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.713084 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.740191 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:52Z\\\",\\\"message\\\":\\\" *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:45:52.245819 5998 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:52.245784 5998 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 10:45:52.245837 5998 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 10:45:52.245844 5998 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:52.245858 5998 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:52.245890 5998 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:52.245905 5998 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:52.245780 5998 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:52.245919 5998 factory.go:656] Stopping watch factory\\\\nI1210 10:45:52.245931 5998 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1210 10:45:52.245938 5998 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:52.245943 5998 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:52.245903 5998 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 10:45:52.245952 5998 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.758410 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.771021 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.773820 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.773863 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.773875 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.773891 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.774323 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.787220 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.811361 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.827710 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.840793 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.853105 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.864629 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.876557 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.876605 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.876619 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.876637 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.876649 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.878407 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.890016 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:52Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.979640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.979673 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.979681 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.979695 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:52 crc kubenswrapper[4682]: I1210 10:45:52.979704 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:52Z","lastTransitionTime":"2025-12-10T10:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.081754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.081791 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.081800 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.081814 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.081823 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.184253 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.184297 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.184309 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.184326 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.184337 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.286533 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.286576 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.286588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.286607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.286619 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.380890 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.380903 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:53 crc kubenswrapper[4682]: E1210 10:45:53.381019 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:53 crc kubenswrapper[4682]: E1210 10:45:53.381197 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.388591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.388627 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.388649 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.388670 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.388691 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.492019 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.492083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.492102 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.492128 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.492146 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.595719 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.595790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.595807 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.595837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.595860 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.648408 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/0.log" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.652609 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.652746 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.667625 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.688512 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.698838 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.698899 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.698916 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.698938 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.698953 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.705456 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.718309 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.732381 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.747067 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.762453 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.776068 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.789907 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.801211 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.801258 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.801272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.801292 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.801306 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.807322 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.828612 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:52Z\\\",\\\"message\\\":\\\" *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:45:52.245819 5998 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:52.245784 5998 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 10:45:52.245837 5998 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 10:45:52.245844 5998 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:52.245858 5998 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:52.245890 5998 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:52.245905 5998 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:52.245780 5998 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:52.245919 5998 factory.go:656] Stopping watch factory\\\\nI1210 10:45:52.245931 5998 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1210 10:45:52.245938 5998 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:52.245943 5998 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:52.245903 5998 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 10:45:52.245952 5998 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.842847 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.854989 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.871181 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.884634 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.903146 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.903179 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.903190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.903206 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:53 crc kubenswrapper[4682]: I1210 10:45:53.903218 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:53Z","lastTransitionTime":"2025-12-10T10:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.005856 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.005885 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.005893 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.005905 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.005914 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.108741 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.108811 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.108821 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.108838 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.108855 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.130033 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p"] Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.130466 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.132033 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.132495 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.141628 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.152573 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.159699 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbkp4\" (UniqueName: \"kubernetes.io/projected/b0490123-88b1-4c35-ad45-3cf66d5d26e6-kube-api-access-dbkp4\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.159751 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0490123-88b1-4c35-ad45-3cf66d5d26e6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.159779 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0490123-88b1-4c35-ad45-3cf66d5d26e6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.159908 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0490123-88b1-4c35-ad45-3cf66d5d26e6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.164071 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.181843 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.198340 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.210749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.210775 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.210783 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.210797 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.210806 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.217113 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:52Z\\\",\\\"message\\\":\\\" *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:45:52.245819 5998 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:52.245784 5998 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 10:45:52.245837 5998 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 10:45:52.245844 5998 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:52.245858 5998 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:52.245890 5998 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:52.245905 5998 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:52.245780 5998 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:52.245919 5998 factory.go:656] Stopping watch factory\\\\nI1210 10:45:52.245931 5998 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1210 10:45:52.245938 5998 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:52.245943 5998 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:52.245903 5998 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 10:45:52.245952 5998 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.235458 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.256726 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.261086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0490123-88b1-4c35-ad45-3cf66d5d26e6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.261241 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0490123-88b1-4c35-ad45-3cf66d5d26e6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.261296 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbkp4\" (UniqueName: \"kubernetes.io/projected/b0490123-88b1-4c35-ad45-3cf66d5d26e6-kube-api-access-dbkp4\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.261379 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0490123-88b1-4c35-ad45-3cf66d5d26e6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.262221 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b0490123-88b1-4c35-ad45-3cf66d5d26e6-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.262298 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b0490123-88b1-4c35-ad45-3cf66d5d26e6-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.269018 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b0490123-88b1-4c35-ad45-3cf66d5d26e6-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.276551 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.278794 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbkp4\" (UniqueName: \"kubernetes.io/projected/b0490123-88b1-4c35-ad45-3cf66d5d26e6-kube-api-access-dbkp4\") pod \"ovnkube-control-plane-749d76644c-wxh8p\" (UID: \"b0490123-88b1-4c35-ad45-3cf66d5d26e6\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.295266 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.312414 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.313763 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.313828 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.313847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.313871 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.313887 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.324769 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.336184 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.355390 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.369120 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.380185 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:54 crc kubenswrapper[4682]: E1210 10:45:54.380322 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.382606 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.417213 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.417271 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.417283 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.417306 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.417322 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.443321 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" Dec 10 10:45:54 crc kubenswrapper[4682]: W1210 10:45:54.468192 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0490123_88b1_4c35_ad45_3cf66d5d26e6.slice/crio-548af47c4bfce976423f86109eb1c5ac9811de91d3786fd8308c0a152368d34d WatchSource:0}: Error finding container 548af47c4bfce976423f86109eb1c5ac9811de91d3786fd8308c0a152368d34d: Status 404 returned error can't find the container with id 548af47c4bfce976423f86109eb1c5ac9811de91d3786fd8308c0a152368d34d Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.521237 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.521550 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.521654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.521750 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.521870 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.626260 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.626722 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.626907 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.627081 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.627248 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.657821 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/1.log" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.658614 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/0.log" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.662220 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8" exitCode=1 Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.662353 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.662526 4682 scope.go:117] "RemoveContainer" containerID="69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.662871 4682 scope.go:117] "RemoveContainer" containerID="36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8" Dec 10 10:45:54 crc kubenswrapper[4682]: E1210 10:45:54.663040 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.664031 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" event={"ID":"b0490123-88b1-4c35-ad45-3cf66d5d26e6","Type":"ContainerStarted","Data":"548af47c4bfce976423f86109eb1c5ac9811de91d3786fd8308c0a152368d34d"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.683117 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.699765 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.718208 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.730415 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.730546 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.730560 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.730579 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.730592 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.742200 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:52Z\\\",\\\"message\\\":\\\" *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:45:52.245819 5998 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:52.245784 5998 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 10:45:52.245837 5998 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 10:45:52.245844 5998 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:52.245858 5998 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:52.245890 5998 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:52.245905 5998 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:52.245780 5998 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:52.245919 5998 factory.go:656] Stopping watch factory\\\\nI1210 10:45:52.245931 5998 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1210 10:45:52.245938 5998 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:52.245943 5998 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:52.245903 5998 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 10:45:52.245952 5998 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.753779 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.767993 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.780045 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.796381 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.809088 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.825436 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.833223 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.833280 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.833296 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.833318 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.833332 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.848520 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.871457 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.894120 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.911445 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.925845 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.944772 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.945162 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.945240 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.945335 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.945404 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:54Z","lastTransitionTime":"2025-12-10T10:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:54 crc kubenswrapper[4682]: I1210 10:45:54.955061 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:54Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.048754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.048815 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.048830 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.048854 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.048872 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.151897 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.152273 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.152340 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.152411 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.152499 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.173636 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.173752 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.173885 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.173951 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:11.173931073 +0000 UTC m=+51.494141823 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.174258 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:46:11.174242003 +0000 UTC m=+51.494452763 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.255071 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.255139 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.255154 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.255177 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.255193 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.274713 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.274771 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.274827 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275005 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275105 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:11.27508522 +0000 UTC m=+51.595295970 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275013 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275157 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275171 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275235 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:11.275216483 +0000 UTC m=+51.595427233 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275625 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275684 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275708 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.275809 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:11.275776091 +0000 UTC m=+51.595986871 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.357971 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.358027 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.358039 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.358058 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.358068 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.380816 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.380904 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.381031 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.381088 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.462059 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.462117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.462129 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.462148 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.462165 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.564640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.564707 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.564723 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.564751 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.564765 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.596114 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-6c5qg"] Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.596792 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.596888 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.610895 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.629046 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.645635 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.667779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.667831 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.667842 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.667859 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.667871 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.669105 4682 scope.go:117] "RemoveContainer" containerID="36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.669290 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.677953 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://69ed951ac8d25a90467fbe458bd55e44b63655d4d75f47de6890b9940e79aa73\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:52Z\\\",\\\"message\\\":\\\" *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:45:52.245819 5998 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 10:45:52.245784 5998 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 10:45:52.245837 5998 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 10:45:52.245844 5998 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 10:45:52.245858 5998 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 10:45:52.245890 5998 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1210 10:45:52.245905 5998 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1210 10:45:52.245780 5998 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:45:52.245919 5998 factory.go:656] Stopping watch factory\\\\nI1210 10:45:52.245931 5998 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1210 10:45:52.245938 5998 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 10:45:52.245943 5998 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1210 10:45:52.245903 5998 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 10:45:52.245952 5998 handler.go:208] Removed *v1.Pod ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.679009 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g94xk\" (UniqueName: \"kubernetes.io/projected/f308e36d-4856-4306-adec-390e40daaee3-kube-api-access-g94xk\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.679084 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.687694 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.687749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.687783 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.687814 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.687828 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.692989 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.704921 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.711631 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.712094 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.712139 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.712155 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.712175 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.712188 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.727790 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.728158 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.733297 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.733375 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.733391 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.733416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.733435 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.746765 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.749427 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.753657 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.753689 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.753702 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.753722 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.753736 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.763396 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.768939 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.779377 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.779787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.779933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.780030 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.780113 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.780055 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g94xk\" (UniqueName: \"kubernetes.io/projected/f308e36d-4856-4306-adec-390e40daaee3-kube-api-access-g94xk\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.780550 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.780821 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.780969 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:56.280932949 +0000 UTC m=+36.601143879 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.782025 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.796668 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: E1210 10:45:55.796798 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.798961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.798993 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.799005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.799023 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.798993 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.799038 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.800762 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g94xk\" (UniqueName: \"kubernetes.io/projected/f308e36d-4856-4306-adec-390e40daaee3-kube-api-access-g94xk\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.820693 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.835153 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.850323 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.865404 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.876613 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.889695 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.901860 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.902047 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.902173 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.902274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.902554 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:55Z","lastTransitionTime":"2025-12-10T10:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.907442 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.933365 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.947065 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.966664 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.983542 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:55 crc kubenswrapper[4682]: I1210 10:45:55.998423 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:55Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.004917 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.005208 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.005272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.005345 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.005436 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.012699 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.027525 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.040784 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.057193 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.073695 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.087769 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.103700 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.107575 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.107618 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.107627 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.107641 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.107651 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.119112 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.141613 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.169551 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.183735 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.210655 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.210695 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.210704 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.210720 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.210729 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.286086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:56 crc kubenswrapper[4682]: E1210 10:45:56.286266 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:56 crc kubenswrapper[4682]: E1210 10:45:56.286324 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:57.286309502 +0000 UTC m=+37.606520252 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.313607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.313672 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.313686 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.313714 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.313730 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.380584 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:56 crc kubenswrapper[4682]: E1210 10:45:56.380717 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.416533 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.416586 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.416607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.416639 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.416659 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.519507 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.519749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.519806 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.519914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.519979 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.623204 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.623558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.623630 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.623694 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.623759 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.673174 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" event={"ID":"b0490123-88b1-4c35-ad45-3cf66d5d26e6","Type":"ContainerStarted","Data":"2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.673252 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" event={"ID":"b0490123-88b1-4c35-ad45-3cf66d5d26e6","Type":"ContainerStarted","Data":"7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.674877 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/1.log" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.689245 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.703034 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.716013 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.726437 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.726717 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.726797 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.726885 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.726955 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.729553 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.743297 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.759252 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.772099 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.792141 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.817637 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.830370 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.830435 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.830453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.830501 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.830523 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.833645 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.846108 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.861664 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.879883 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.897641 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.913946 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.931720 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.934133 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.934190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.934203 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.934225 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.934238 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:56Z","lastTransitionTime":"2025-12-10T10:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:56 crc kubenswrapper[4682]: I1210 10:45:56.964121 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:56Z is after 2025-08-24T17:21:41Z" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.036937 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.036997 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.037010 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.037036 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.037058 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.140219 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.140278 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.140318 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.140361 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.140380 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.243187 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.243265 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.243289 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.243317 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.243330 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.297299 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:57 crc kubenswrapper[4682]: E1210 10:45:57.297594 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:57 crc kubenswrapper[4682]: E1210 10:45:57.297708 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:45:59.297677359 +0000 UTC m=+39.617888149 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.346269 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.346304 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.346315 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.346330 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.346341 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.380318 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.380327 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:57 crc kubenswrapper[4682]: E1210 10:45:57.380446 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.380350 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:57 crc kubenswrapper[4682]: E1210 10:45:57.380541 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:57 crc kubenswrapper[4682]: E1210 10:45:57.380826 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.449165 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.449209 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.449220 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.449236 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.449245 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.552148 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.552208 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.552223 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.552247 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.552265 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.654950 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.655033 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.655056 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.655089 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.655148 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.757966 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.758013 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.758023 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.758038 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.758047 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.860699 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.860741 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.860752 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.860774 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.860797 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.964070 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.964112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.964124 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.964142 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:57 crc kubenswrapper[4682]: I1210 10:45:57.964157 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:57Z","lastTransitionTime":"2025-12-10T10:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.066582 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.066943 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.067023 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.067116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.067205 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.171034 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.171091 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.171100 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.171116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.171128 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.274224 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.274379 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.274407 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.274448 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.274523 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.377005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.377321 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.377410 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.377496 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.377575 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.380638 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:45:58 crc kubenswrapper[4682]: E1210 10:45:58.380826 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.480383 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.480448 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.480502 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.480529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.480548 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.584299 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.584353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.584367 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.584393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.584410 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.686736 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.686798 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.686810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.686831 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.686842 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.789640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.789692 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.789709 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.789729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.789740 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.892910 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.892975 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.892992 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.893017 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.893035 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.995542 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.995898 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.995935 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.995965 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:58 crc kubenswrapper[4682]: I1210 10:45:58.995986 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:58Z","lastTransitionTime":"2025-12-10T10:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.099526 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.099576 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.099584 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.099600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.099616 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.202316 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.202385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.202399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.202415 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.202427 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.305877 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.305961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.305983 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.306015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.306053 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.318896 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:59 crc kubenswrapper[4682]: E1210 10:45:59.319104 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:59 crc kubenswrapper[4682]: E1210 10:45:59.319214 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:03.319182035 +0000 UTC m=+43.639392825 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.380721 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.380772 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.380785 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:45:59 crc kubenswrapper[4682]: E1210 10:45:59.380905 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:45:59 crc kubenswrapper[4682]: E1210 10:45:59.381017 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:45:59 crc kubenswrapper[4682]: E1210 10:45:59.381103 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.409034 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.409099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.409121 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.409145 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.409163 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.512360 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.512425 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.512437 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.512457 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.512500 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.616217 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.616283 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.616301 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.616326 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.616345 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.718308 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.718353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.718363 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.718378 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.718390 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.820511 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.820567 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.820585 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.820603 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.820614 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.902590 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.903654 4682 scope.go:117] "RemoveContainer" containerID="36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8" Dec 10 10:45:59 crc kubenswrapper[4682]: E1210 10:45:59.903883 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.923526 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.923572 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.923588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.923608 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:45:59 crc kubenswrapper[4682]: I1210 10:45:59.923621 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:45:59Z","lastTransitionTime":"2025-12-10T10:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.027982 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.028067 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.028088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.028117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.028149 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.131847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.132211 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.132354 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.132453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.132569 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.236174 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.236574 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.236721 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.236909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.237040 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.340791 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.340842 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.340852 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.340872 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.340885 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.380929 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:00 crc kubenswrapper[4682]: E1210 10:46:00.381057 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.409292 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.427121 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.443819 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.443874 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.443888 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.443912 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.443928 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.444702 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.459299 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.479617 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.490888 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.508555 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.513746 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.519326 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.531549 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.543574 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.546307 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.546346 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.546355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.546371 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.546383 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.555574 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.570464 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.584692 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.603992 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.619797 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.631438 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.645039 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.647868 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.647903 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.647913 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.647928 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.647938 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:00Z","lastTransitionTime":"2025-12-10T10:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.666647 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.675937 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.684992 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.707760 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:00 crc kubenswrapper[4682]: I1210 10:46:00.721016 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.380500 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.381344 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:01 crc kubenswrapper[4682]: E1210 10:46:01.381485 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:01 crc kubenswrapper[4682]: E1210 10:46:01.381597 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.381639 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:01 crc kubenswrapper[4682]: E1210 10:46:01.381851 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.505563 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.505649 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.505679 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.505717 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.505740 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.507790 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.524166 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.543991 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.561314 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.577084 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.589298 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.603725 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.608764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.608798 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.608810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.608827 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.608838 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.619939 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.633251 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.646531 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.671354 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.699198 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:01Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.711392 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.711446 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.711462 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.711506 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.711522 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.815047 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.815116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.815137 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.815164 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.815186 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.918280 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.918349 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.918374 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.918408 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:01 crc kubenswrapper[4682]: I1210 10:46:01.918431 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:01Z","lastTransitionTime":"2025-12-10T10:46:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.021274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.021387 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.021429 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.021452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.021487 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.124620 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.124682 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.124694 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.124719 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.124734 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.227799 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.227858 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.227868 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.227886 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.227900 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.331801 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.331867 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.331886 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.331909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.331928 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.380414 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:02 crc kubenswrapper[4682]: E1210 10:46:02.380568 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.435153 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.435234 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.435257 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.435294 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.435318 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.541764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.541841 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.541856 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.541879 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.541893 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.644950 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.645012 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.645028 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.645071 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.645090 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.747847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.747914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.747928 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.747951 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.747965 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.850852 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.850927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.850941 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.850963 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.850978 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.953366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.953417 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.953433 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.953459 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:02 crc kubenswrapper[4682]: I1210 10:46:02.953515 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:02Z","lastTransitionTime":"2025-12-10T10:46:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.055789 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.055828 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.055837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.055856 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.055867 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.159005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.159069 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.159086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.159112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.159130 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.262692 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.262786 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.262798 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.262824 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.262841 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.359151 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:03 crc kubenswrapper[4682]: E1210 10:46:03.359343 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:03 crc kubenswrapper[4682]: E1210 10:46:03.359436 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:11.35941078 +0000 UTC m=+51.679621570 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.366677 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.366737 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.366755 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.366778 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.366793 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.382487 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.382540 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:03 crc kubenswrapper[4682]: E1210 10:46:03.382641 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.382494 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:03 crc kubenswrapper[4682]: E1210 10:46:03.382867 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:03 crc kubenswrapper[4682]: E1210 10:46:03.383085 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.470444 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.470534 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.470557 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.470589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.470609 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.573658 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.573720 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.573733 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.573754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.573767 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.678378 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.678579 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.678676 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.678717 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.678744 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.781627 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.781681 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.781690 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.781708 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.781717 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.885006 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.885056 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.885068 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.885088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.885102 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.988380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.988451 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.988509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.988538 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:03 crc kubenswrapper[4682]: I1210 10:46:03.988559 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:03Z","lastTransitionTime":"2025-12-10T10:46:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.091206 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.091256 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.091267 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.091283 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.091293 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.194075 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.194126 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.194134 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.194149 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.194161 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.297589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.297657 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.297672 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.297697 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.297721 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.380509 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:04 crc kubenswrapper[4682]: E1210 10:46:04.380661 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.400763 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.400810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.400820 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.400837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.400850 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.504509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.504555 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.504568 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.504607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.504622 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.607659 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.607714 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.607732 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.607754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.607770 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.709620 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.709664 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.709676 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.709710 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.709724 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.812243 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.812285 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.812296 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.812312 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.812324 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.916807 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.916880 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.916891 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.916910 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:04 crc kubenswrapper[4682]: I1210 10:46:04.916922 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:04Z","lastTransitionTime":"2025-12-10T10:46:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.020719 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.020752 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.020760 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.020773 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.020782 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.124169 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.124241 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.124265 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.124295 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.124318 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.228277 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.228337 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.228354 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.228376 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.228393 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.330893 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.330959 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.330974 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.331276 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.331315 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.380779 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.380779 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.381048 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.381071 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.381181 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.381333 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.434849 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.434908 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.434927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.434952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.434987 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.537955 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.538009 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.538035 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.538065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.538088 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.641420 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.641533 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.641572 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.641601 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.641620 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.745550 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.745666 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.745679 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.745705 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.745719 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.805219 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.805278 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.805290 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.805304 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.805314 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.819190 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.823562 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.823600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.823611 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.823628 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.823639 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.836649 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.840193 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.840248 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.840264 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.840283 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.840295 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.855606 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.860518 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.860593 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.860613 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.860634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.860647 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.879666 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.884637 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.884694 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.884710 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.884735 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.884751 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.904722 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:05Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:05 crc kubenswrapper[4682]: E1210 10:46:05.904977 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.907452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.907531 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.907552 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.907576 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:05 crc kubenswrapper[4682]: I1210 10:46:05.907590 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:05Z","lastTransitionTime":"2025-12-10T10:46:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.010983 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.011075 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.011100 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.011136 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.011159 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.114220 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.114305 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.114318 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.114354 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.114366 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.216928 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.216999 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.217012 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.217034 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.217050 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.320230 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.320298 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.320316 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.320344 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.320365 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.380764 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:06 crc kubenswrapper[4682]: E1210 10:46:06.380918 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.422935 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.422984 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.422996 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.423014 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.423026 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.525513 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.525560 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.525593 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.525606 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.525616 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.628444 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.628546 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.628565 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.629079 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.629138 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.732082 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.732140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.732158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.732182 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.732200 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.835158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.835224 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.835238 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.835254 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.835266 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.937843 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.937902 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.937919 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.937943 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:06 crc kubenswrapper[4682]: I1210 10:46:06.937958 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:06Z","lastTransitionTime":"2025-12-10T10:46:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.043148 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.043197 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.043208 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.043221 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.043231 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.146247 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.146303 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.146315 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.146334 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.146346 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.249399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.249452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.249465 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.249505 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.249518 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.352607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.352688 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.352729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.352776 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.352802 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.380603 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.380603 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:07 crc kubenswrapper[4682]: E1210 10:46:07.380774 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:07 crc kubenswrapper[4682]: E1210 10:46:07.380871 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.380603 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:07 crc kubenswrapper[4682]: E1210 10:46:07.380997 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.456513 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.456576 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.456591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.456617 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.456632 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.559405 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.559451 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.559460 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.559548 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.559566 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.662735 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.662783 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.662793 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.662808 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.662820 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.765423 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.765494 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.765515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.765536 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.765548 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.868914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.868963 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.868973 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.868989 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.869001 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.971524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.971599 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.971613 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.971633 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:07 crc kubenswrapper[4682]: I1210 10:46:07.971646 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:07Z","lastTransitionTime":"2025-12-10T10:46:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.017384 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.029361 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.035804 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.049459 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.063175 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.074195 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.074313 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.074747 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.074760 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.074777 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.074788 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.089508 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.105064 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.120347 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.139141 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.165934 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.177177 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.177275 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.177295 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.177353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.177374 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.179153 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.193814 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.204771 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.217415 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.241589 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.256668 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.272854 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.279914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.279976 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.279995 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.280041 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.280064 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.285664 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:08Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.380794 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:08 crc kubenswrapper[4682]: E1210 10:46:08.380999 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.382997 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.383077 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.383093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.383116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.383132 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.491093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.491183 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.491197 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.491217 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.491229 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.594052 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.594106 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.594119 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.594150 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.594168 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.697090 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.697168 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.697184 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.697210 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.697227 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.800272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.800325 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.800337 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.800371 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.800383 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.902933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.902996 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.903018 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.903043 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:08 crc kubenswrapper[4682]: I1210 10:46:08.903062 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:08Z","lastTransitionTime":"2025-12-10T10:46:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.006073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.006125 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.006142 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.006162 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.006179 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.108841 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.109118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.109164 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.109189 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.109209 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.211996 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.212069 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.212093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.212123 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.212146 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.315004 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.315099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.315125 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.315163 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.315183 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.380861 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.380948 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.380864 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:09 crc kubenswrapper[4682]: E1210 10:46:09.381044 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:09 crc kubenswrapper[4682]: E1210 10:46:09.381198 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:09 crc kubenswrapper[4682]: E1210 10:46:09.381265 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.420015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.420095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.420116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.420148 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.420169 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.522961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.523040 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.523055 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.523071 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.523084 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.626233 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.626308 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.626330 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.626359 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.626381 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.728295 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.728347 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.728359 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.728376 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.728387 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.831274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.831329 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.831371 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.831393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.831408 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.934669 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.934731 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.934764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.934788 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:09 crc kubenswrapper[4682]: I1210 10:46:09.934804 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:09Z","lastTransitionTime":"2025-12-10T10:46:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.037316 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.037380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.037397 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.037419 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.037433 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.140862 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.140939 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.140964 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.140998 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.141021 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.243874 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.243913 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.243927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.243947 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.243958 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.347072 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.347137 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.347150 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.347168 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.347180 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.380878 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:10 crc kubenswrapper[4682]: E1210 10:46:10.381001 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.403837 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.418461 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.431728 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.444094 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.450624 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.450669 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.450685 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.450706 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.450722 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.454647 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.469875 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.483669 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.496496 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.510883 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.528197 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.540332 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.553073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.553103 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.553113 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.553126 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.553135 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.557734 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.568889 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.579347 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.596317 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.607090 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.620547 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.632921 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:10Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.655756 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.655833 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.655848 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.655869 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.655885 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.759061 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.759117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.759134 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.759157 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.759174 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.862782 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.862847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.862860 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.862881 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.862920 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.966205 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.966265 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.966283 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.966307 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:10 crc kubenswrapper[4682]: I1210 10:46:10.966325 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:10Z","lastTransitionTime":"2025-12-10T10:46:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.069008 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.069070 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.069088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.069119 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.069136 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.172358 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.172410 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.172424 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.172447 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.172463 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.249646 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.249901 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:46:43.249864248 +0000 UTC m=+83.570075008 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.250054 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.250219 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.250290 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:43.250276501 +0000 UTC m=+83.570487261 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.276051 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.276112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.276123 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.276140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.276151 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.351538 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.351617 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.351654 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351802 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351825 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351841 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351851 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351885 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351948 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:43.351928644 +0000 UTC m=+83.672139394 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351862 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.351850 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.352121 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:43.352083869 +0000 UTC m=+83.672294739 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.352157 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:43.35214011 +0000 UTC m=+83.672351070 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380124 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380229 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380141 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.380311 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380414 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.380458 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380508 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.380561 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.380685 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.452802 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.452979 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: E1210 10:46:11.453083 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:27.45305475 +0000 UTC m=+67.773265540 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.483266 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.483336 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.483357 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.483386 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.483409 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.586532 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.586585 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.586596 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.586617 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.586631 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.689789 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.689847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.689857 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.689877 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.689889 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.792976 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.793026 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.793038 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.793055 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.793067 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.895602 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.895639 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.895650 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.895666 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.895676 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.997720 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.997805 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.997844 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.997876 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:11 crc kubenswrapper[4682]: I1210 10:46:11.997901 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:11Z","lastTransitionTime":"2025-12-10T10:46:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.100867 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.100957 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.101004 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.101031 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.101050 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.204031 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.204117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.204155 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.204188 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.204212 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.306998 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.307064 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.307083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.307109 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.307128 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.380741 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:12 crc kubenswrapper[4682]: E1210 10:46:12.381231 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.409634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.409680 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.409689 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.409704 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.409714 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.512952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.513028 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.513054 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.513086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.513115 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.615684 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.615964 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.616057 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.616158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.616235 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.718184 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.718241 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.718252 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.718267 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.718280 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.821035 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.821077 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.821093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.821114 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.821131 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.923753 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.923791 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.923802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.923816 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:12 crc kubenswrapper[4682]: I1210 10:46:12.923825 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:12Z","lastTransitionTime":"2025-12-10T10:46:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.026762 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.026824 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.026841 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.026864 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.026880 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.130094 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.130151 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.130167 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.130187 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.130202 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.232438 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.232499 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.232509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.232525 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.232540 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.335413 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.335458 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.335488 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.335510 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.335522 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.379947 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.379993 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:13 crc kubenswrapper[4682]: E1210 10:46:13.380147 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.380221 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:13 crc kubenswrapper[4682]: E1210 10:46:13.380405 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:13 crc kubenswrapper[4682]: E1210 10:46:13.381095 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.381561 4682 scope.go:117] "RemoveContainer" containerID="36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.437521 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.437962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.437972 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.437990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.438003 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.540399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.540437 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.540454 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.540512 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.540532 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.643354 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.643423 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.643436 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.643456 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.643490 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.739888 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/1.log" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.743194 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.743744 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.745253 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.745300 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.745317 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.745332 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.745345 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.764929 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.777632 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.797979 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.816851 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.828906 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.840937 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.847293 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.847333 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.847341 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.847354 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.847364 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.853459 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.864140 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.878110 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.892160 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.907596 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.920051 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.940975 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.949677 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.949705 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.949715 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.949728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.949736 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:13Z","lastTransitionTime":"2025-12-10T10:46:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.964070 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:13 crc kubenswrapper[4682]: I1210 10:46:13.981314 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.000324 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:13Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.012058 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.022801 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.052675 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.052743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.052754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.052769 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.052778 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.155089 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.155131 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.155142 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.155157 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.155169 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.257749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.257809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.257847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.257873 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.257890 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.360802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.360861 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.360873 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.360889 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.360902 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.380679 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:14 crc kubenswrapper[4682]: E1210 10:46:14.380812 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.463732 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.463780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.463794 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.463815 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.463826 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.566702 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.566736 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.566748 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.566765 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.566777 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.668935 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.668975 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.668985 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.669001 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.669012 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.746869 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/2.log" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.747689 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/1.log" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.750364 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6" exitCode=1 Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.750404 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.750444 4682 scope.go:117] "RemoveContainer" containerID="36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.751406 4682 scope.go:117] "RemoveContainer" containerID="09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6" Dec 10 10:46:14 crc kubenswrapper[4682]: E1210 10:46:14.751778 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.771706 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.771800 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.771955 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.771968 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.771984 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.771996 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.787136 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.800072 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.818859 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.831048 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.847781 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.859046 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.873818 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.874918 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.874946 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.874955 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.874969 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.874980 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.887278 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.902201 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.914996 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.928034 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.941665 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.954739 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.968105 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.977568 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.977618 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.977634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.977654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.977669 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:14Z","lastTransitionTime":"2025-12-10T10:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:14 crc kubenswrapper[4682]: I1210 10:46:14.982451 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:14Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.005090 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://36f15f6d81a98de5070e00caa4bca61fd85dfa4fabe55a0cb381348986f90ea8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"message\\\":\\\"-config-operator/machine-config-daemon-58skk\\\\nF1210 10:45:53.361609 6146 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:45:53Z is after 2025-08-24T17:21:41Z]\\\\nI1210 10:45:53.361614 6146 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1210 10:45:53.361620 6146 lb_config.go:1031] Cluster endpoints for openshift-route-controller-manager/route-controller-manager for network=default are: map[]\\\\nI1210 10:45:53.361622 6146 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-58skk\\\\nI1210 10:45:53.361614 6146 services_control\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.016803 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.080327 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.080373 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.080382 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.080416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.080432 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.183394 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.183463 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.183528 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.183562 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.183586 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.286343 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.286385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.286397 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.286416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.286428 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.381019 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.381048 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:15 crc kubenswrapper[4682]: E1210 10:46:15.381190 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.381048 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:15 crc kubenswrapper[4682]: E1210 10:46:15.381327 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:15 crc kubenswrapper[4682]: E1210 10:46:15.381453 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.388563 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.388591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.388600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.388613 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.388621 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.490882 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.490931 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.490943 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.490961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.490976 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.593341 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.593372 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.593380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.593393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.593402 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.696118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.696168 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.696179 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.696194 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.696207 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.754813 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/2.log" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.759193 4682 scope.go:117] "RemoveContainer" containerID="09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6" Dec 10 10:46:15 crc kubenswrapper[4682]: E1210 10:46:15.759363 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.769119 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.780809 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.795427 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.798743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.798846 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.798866 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.798889 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.798906 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.809925 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.826671 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.840778 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.866549 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.882289 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.898649 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.907603 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.907682 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.907698 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.907721 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.907738 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:15Z","lastTransitionTime":"2025-12-10T10:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.918732 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.931759 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.945826 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.961621 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.974126 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.986755 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:15 crc kubenswrapper[4682]: I1210 10:46:15.999542 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:15Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.010038 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.010094 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.010112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.010133 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.010148 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.015831 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.015859 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.015870 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.015885 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.015896 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.019867 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.028938 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.031668 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.032300 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.032335 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.032348 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.032367 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.032379 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.048023 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.052020 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.052048 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.052057 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.052074 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.052084 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.064039 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.067853 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.067898 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.067908 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.067925 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.067935 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.078091 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.081094 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.081127 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.081136 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.081151 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.081162 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.091776 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:16Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.091882 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.112270 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.112317 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.112334 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.112356 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.112372 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.216096 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.216163 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.216182 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.216207 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.216225 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.318941 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.318990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.319002 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.319021 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.319034 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.381069 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:16 crc kubenswrapper[4682]: E1210 10:46:16.381245 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.421774 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.421844 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.421862 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.421882 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.421901 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.524610 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.524673 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.524691 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.524720 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.524744 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.627293 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.627332 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.627343 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.627361 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.627372 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.730809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.730870 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.730882 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.730902 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.730919 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.834506 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.834562 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.834586 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.834611 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.834629 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.937852 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.937912 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.937950 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.937982 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:16 crc kubenswrapper[4682]: I1210 10:46:16.938005 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:16Z","lastTransitionTime":"2025-12-10T10:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.040777 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.040810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.040822 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.040835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.040844 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.143799 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.143861 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.143898 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.143933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.143956 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.247009 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.247046 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.247055 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.247068 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.247077 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.350446 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.350517 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.350540 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.350558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.350569 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.380379 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.380428 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:17 crc kubenswrapper[4682]: E1210 10:46:17.380673 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.380727 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:17 crc kubenswrapper[4682]: E1210 10:46:17.380916 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:17 crc kubenswrapper[4682]: E1210 10:46:17.381021 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.453560 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.453623 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.453642 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.453667 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.453685 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.556214 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.556261 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.556273 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.556288 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.556299 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.659312 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.659375 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.659392 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.659415 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.659435 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.762273 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.762350 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.762372 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.762399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.762423 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.866441 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.866535 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.866553 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.866577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.866595 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.969350 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.969438 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.969452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.969490 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:17 crc kubenswrapper[4682]: I1210 10:46:17.969504 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:17Z","lastTransitionTime":"2025-12-10T10:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.072667 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.072716 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.072727 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.072748 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.072760 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.175884 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.175933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.175950 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.175976 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.175994 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.279394 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.279458 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.279520 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.279552 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.279572 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.380710 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:18 crc kubenswrapper[4682]: E1210 10:46:18.380902 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.382819 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.382871 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.382889 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.382914 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.382932 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.486384 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.486459 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.486524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.486557 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.486580 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.589083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.589131 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.589142 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.589159 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.589172 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.691821 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.691873 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.691885 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.691905 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.691918 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.794651 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.794711 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.794728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.794752 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.794768 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.898180 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.898272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.898311 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.898344 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:18 crc kubenswrapper[4682]: I1210 10:46:18.898364 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:18Z","lastTransitionTime":"2025-12-10T10:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.000920 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.000974 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.000986 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.001003 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.001015 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.104685 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.105112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.105124 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.105163 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.105174 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.206990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.207050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.207065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.207088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.207106 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.311381 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.311462 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.311514 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.311540 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.311570 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.381083 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.381151 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:19 crc kubenswrapper[4682]: E1210 10:46:19.381328 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.381348 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:19 crc kubenswrapper[4682]: E1210 10:46:19.381521 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:19 crc kubenswrapper[4682]: E1210 10:46:19.381646 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.415108 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.415174 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.415190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.415218 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.415238 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.518206 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.518261 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.518273 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.518291 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.518308 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.621883 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.621922 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.621931 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.621944 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.621955 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.725005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.725069 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.725087 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.725106 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.725117 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.827742 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.827772 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.827780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.827793 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.827802 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.930175 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.930231 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.930242 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.930258 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:19 crc kubenswrapper[4682]: I1210 10:46:19.930269 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:19Z","lastTransitionTime":"2025-12-10T10:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.033006 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.033061 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.033075 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.033093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.033107 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.136894 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.136946 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.136962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.136984 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.137000 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.239573 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.239645 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.239662 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.239684 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.239700 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.341368 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.341403 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.341414 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.341431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.341442 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.380233 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:20 crc kubenswrapper[4682]: E1210 10:46:20.380330 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.410122 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.431904 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.444144 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.444196 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.444208 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.444226 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.444247 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.449972 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.465636 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.484273 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.497414 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.512913 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.529124 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.542243 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.550565 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.550618 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.550632 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.550651 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.550664 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.563292 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.578419 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.597413 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.615453 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.636905 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.652997 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.653041 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.653205 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.653240 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.653256 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.674021 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.688955 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.703401 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.720659 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:20Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.756128 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.756182 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.756198 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.756222 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.756239 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.859086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.859159 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.859186 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.859218 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.859241 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.962927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.962995 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.963015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.963045 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:20 crc kubenswrapper[4682]: I1210 10:46:20.963065 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:20Z","lastTransitionTime":"2025-12-10T10:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.066446 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.066541 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.066559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.066583 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.066600 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.169453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.169526 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.169537 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.169554 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.169566 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.272358 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.272432 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.272454 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.272521 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.272546 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.375530 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.375578 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.375595 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.375614 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.375628 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.380235 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.380304 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:21 crc kubenswrapper[4682]: E1210 10:46:21.380412 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.380240 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:21 crc kubenswrapper[4682]: E1210 10:46:21.380695 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:21 crc kubenswrapper[4682]: E1210 10:46:21.380754 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.478031 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.478093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.478110 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.478132 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.478147 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.581366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.581411 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.581422 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.581443 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.581457 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.684307 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.684357 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.684366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.684507 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.684531 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.787081 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.787121 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.787131 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.787145 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.787160 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.889901 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.889944 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.889953 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.889967 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.889977 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.993400 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.993529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.993550 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.993575 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:21 crc kubenswrapper[4682]: I1210 10:46:21.993592 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:21Z","lastTransitionTime":"2025-12-10T10:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.097363 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.097432 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.097452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.097504 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.097524 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.201008 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.201079 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.201101 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.201129 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.201150 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.303909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.303969 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.303991 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.304022 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.304047 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.380111 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:22 crc kubenswrapper[4682]: E1210 10:46:22.380257 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.407098 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.407140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.407152 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.407168 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.407180 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.509774 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.509817 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.509827 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.509840 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.509851 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.612400 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.612459 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.612493 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.612514 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.612526 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.715762 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.715830 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.715849 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.715873 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.715889 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.818380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.818537 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.818577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.818613 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.818650 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.921558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.921615 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.921634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.921661 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:22 crc kubenswrapper[4682]: I1210 10:46:22.921679 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:22Z","lastTransitionTime":"2025-12-10T10:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.024814 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.024852 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.024864 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.024881 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.024892 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.127772 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.127826 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.127840 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.127858 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.127870 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.231005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.231045 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.231054 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.231068 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.231078 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.333290 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.333324 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.333336 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.333354 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.333365 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.380447 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:23 crc kubenswrapper[4682]: E1210 10:46:23.380659 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.380723 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.380728 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:23 crc kubenswrapper[4682]: E1210 10:46:23.380877 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:23 crc kubenswrapper[4682]: E1210 10:46:23.380996 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.435734 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.435790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.435809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.435835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.435854 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.538905 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.538967 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.538991 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.539023 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.539046 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.642556 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.642616 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.642634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.642659 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.642683 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.745708 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.745759 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.745776 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.745799 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.745820 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.848677 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.848731 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.848745 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.848768 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.848783 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.951339 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.951378 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.951390 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.951409 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:23 crc kubenswrapper[4682]: I1210 10:46:23.951421 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:23Z","lastTransitionTime":"2025-12-10T10:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.055636 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.055704 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.055716 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.055733 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.055746 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.158259 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.158301 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.158312 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.158333 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.158346 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.260950 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.260994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.261011 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.261032 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.261047 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.363321 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.363356 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.363365 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.363379 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.363388 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.380626 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:24 crc kubenswrapper[4682]: E1210 10:46:24.380880 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.466408 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.466500 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.466527 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.466559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.466581 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.568683 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.568725 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.568736 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.568754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.568770 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.671562 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.671617 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.671638 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.671659 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.671676 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.774048 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.774091 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.774099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.774114 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.774123 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.876713 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.876757 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.876770 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.876787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.876799 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.979099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.979130 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.979140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.979152 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:24 crc kubenswrapper[4682]: I1210 10:46:24.979161 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:24Z","lastTransitionTime":"2025-12-10T10:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.082596 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.082665 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.082676 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.082696 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.082708 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.185362 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.185430 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.185442 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.185513 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.185532 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.288008 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.288056 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.288069 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.288086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.288099 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.380553 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.380577 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.380558 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:25 crc kubenswrapper[4682]: E1210 10:46:25.380694 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:25 crc kubenswrapper[4682]: E1210 10:46:25.380978 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:25 crc kubenswrapper[4682]: E1210 10:46:25.381049 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.389930 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.389967 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.389979 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.389994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.390004 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.493180 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.493238 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.493251 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.493272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.493285 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.596619 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.596685 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.596705 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.596727 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.596743 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.700933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.700980 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.700995 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.701015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.701025 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.802880 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.802915 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.802924 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.802953 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.802968 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.906134 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.906198 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.906215 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.906240 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:25 crc kubenswrapper[4682]: I1210 10:46:25.906262 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:25Z","lastTransitionTime":"2025-12-10T10:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.009803 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.009847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.009858 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.009876 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.009891 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.113362 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.113423 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.113438 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.113459 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.113497 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.215648 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.215717 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.215729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.215747 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.215758 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.221002 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.221040 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.221050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.221066 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.221078 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.233246 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.237238 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.237285 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.237298 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.237322 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.237337 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.250105 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.253750 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.253809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.253824 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.253848 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.253861 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.265856 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.269671 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.269738 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.269752 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.269772 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.269784 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.282883 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.287836 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.287876 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.287886 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.287901 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.287912 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.301846 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:26Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.301980 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.318365 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.318879 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.318907 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.318927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.318945 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.380505 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:26 crc kubenswrapper[4682]: E1210 10:46:26.380672 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.421141 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.421222 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.421232 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.421250 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.421263 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.525030 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.525087 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.525113 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.525131 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.525142 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.627948 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.627993 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.628004 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.628019 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.628031 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.731074 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.731106 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.731116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.731130 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.731140 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.834079 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.834156 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.834184 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.834202 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.834214 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.939933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.939994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.940003 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.940017 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:26 crc kubenswrapper[4682]: I1210 10:46:26.940047 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:26Z","lastTransitionTime":"2025-12-10T10:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.043096 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.043149 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.043162 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.043180 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.043190 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.145781 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.145827 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.145839 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.145854 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.145866 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.247654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.247681 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.247690 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.247703 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.247713 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.350121 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.350207 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.350221 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.350248 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.350263 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.380657 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.380704 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:27 crc kubenswrapper[4682]: E1210 10:46:27.380807 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.380861 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:27 crc kubenswrapper[4682]: E1210 10:46:27.380895 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:27 crc kubenswrapper[4682]: E1210 10:46:27.381082 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.452952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.452984 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.452994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.453009 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.453018 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.526699 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:27 crc kubenswrapper[4682]: E1210 10:46:27.526967 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:27 crc kubenswrapper[4682]: E1210 10:46:27.527141 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:46:59.527109177 +0000 UTC m=+99.847319927 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.554640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.554679 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.554687 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.554701 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.554711 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.657384 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.657431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.657440 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.657455 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.657464 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.760022 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.760080 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.760098 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.760122 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.760137 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.863636 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.863678 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.863694 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.863711 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.863723 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.966374 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.966418 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.966429 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.966446 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:27 crc kubenswrapper[4682]: I1210 10:46:27.966485 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:27Z","lastTransitionTime":"2025-12-10T10:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.068713 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.068787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.068807 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.068836 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.068854 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.171366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.171529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.171559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.171589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.171612 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.274261 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.274306 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.274317 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.274335 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.274346 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.376682 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.376726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.376737 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.376753 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.376765 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.380022 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:28 crc kubenswrapper[4682]: E1210 10:46:28.380169 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.479790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.479861 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.479887 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.479912 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.479930 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.583563 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.583641 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.583659 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.583769 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.583804 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.686228 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.686289 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.686308 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.686330 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.686346 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.788980 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.789042 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.789059 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.789082 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.789098 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.891760 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.891800 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.891810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.891826 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.891837 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.994486 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.994714 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.994728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.994744 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:28 crc kubenswrapper[4682]: I1210 10:46:28.994754 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:28Z","lastTransitionTime":"2025-12-10T10:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.097206 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.097287 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.097298 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.097314 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.097325 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.203837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.203892 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.203909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.203931 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.203948 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.305912 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.305939 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.305948 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.305961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.305971 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.380804 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.380838 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:29 crc kubenswrapper[4682]: E1210 10:46:29.380971 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.381047 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:29 crc kubenswrapper[4682]: E1210 10:46:29.381154 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:29 crc kubenswrapper[4682]: E1210 10:46:29.381199 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.381794 4682 scope.go:117] "RemoveContainer" containerID="09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6" Dec 10 10:46:29 crc kubenswrapper[4682]: E1210 10:46:29.381918 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.408766 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.408808 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.408828 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.408849 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.408866 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.511716 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.511779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.511790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.511806 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.511818 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.615447 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.615567 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.615593 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.615651 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.615671 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.720253 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.720296 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.720305 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.720321 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.720355 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.822447 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.822499 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.822514 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.822529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.822538 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.925255 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.925654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.925665 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.925680 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:29 crc kubenswrapper[4682]: I1210 10:46:29.925691 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:29Z","lastTransitionTime":"2025-12-10T10:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.028722 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.028790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.028803 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.028819 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.028830 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.131063 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.131264 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.131434 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.131603 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.131737 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.234108 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.234147 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.234158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.234173 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.234183 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.337250 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.337289 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.337297 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.337309 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.337317 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.380848 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:30 crc kubenswrapper[4682]: E1210 10:46:30.381035 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.396778 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.408049 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.418330 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.430920 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.439716 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.439751 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.439760 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.439774 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.439783 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.442112 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.456995 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.473665 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.491126 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.512988 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.531499 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.542075 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.542128 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.542151 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.542173 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.542191 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.546724 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.566169 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.577609 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.589583 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.612149 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.631597 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.644929 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.645005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.645049 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.645076 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.645097 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.651866 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.669702 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.747135 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.747180 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.747189 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.747202 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.747211 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.811867 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/0.log" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.811953 4682 generic.go:334] "Generic (PLEG): container finished" podID="a005c959-3805-4e15-aa3a-7093815e03b8" containerID="18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873" exitCode=1 Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.812001 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerDied","Data":"18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.812558 4682 scope.go:117] "RemoveContainer" containerID="18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.835637 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.850515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.850559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.850572 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.850592 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.850604 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.851824 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.861910 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.874830 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.888058 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.901465 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.915560 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.929206 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.945814 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.952217 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.952252 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.952263 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.952276 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.952284 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:30Z","lastTransitionTime":"2025-12-10T10:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.957054 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.967377 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.976401 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.989117 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:30 crc kubenswrapper[4682]: I1210 10:46:30.998435 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:30Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.012254 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.021523 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.042981 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.054654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.054692 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.054702 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.054734 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.054745 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.055257 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.157065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.157103 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.157115 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.157129 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.157141 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.259396 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.259505 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.259520 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.259538 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.259551 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.361231 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.361265 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.361274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.361291 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.361310 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.380212 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:31 crc kubenswrapper[4682]: E1210 10:46:31.380319 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.380348 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.380413 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:31 crc kubenswrapper[4682]: E1210 10:46:31.380501 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:31 crc kubenswrapper[4682]: E1210 10:46:31.380718 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.464541 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.464584 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.464595 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.464611 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.464621 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.567729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.567773 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.567785 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.567800 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.567812 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.671311 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.671368 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.671380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.671402 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.671416 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.774047 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.774110 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.774135 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.774167 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.774190 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.817393 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/0.log" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.817521 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerStarted","Data":"a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.839681 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.859007 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.876240 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.877656 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.877708 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.877726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.877750 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.877766 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.890499 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.904295 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.917179 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.928591 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.941324 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.952027 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.965645 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.979874 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.979922 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.979936 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.979952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.979966 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:31Z","lastTransitionTime":"2025-12-10T10:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.981584 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:31 crc kubenswrapper[4682]: I1210 10:46:31.995194 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:31Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.008987 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.026311 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.038161 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.051980 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.062578 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.077900 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:32Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.082352 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.082377 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.082385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.082401 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.082413 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.185564 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.185603 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.185612 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.185628 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.185637 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.287861 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.287909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.287920 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.287936 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.287945 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.380595 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:32 crc kubenswrapper[4682]: E1210 10:46:32.380737 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.389790 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.389812 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.389822 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.389835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.389844 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.492207 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.492317 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.492336 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.492355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.492370 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.594521 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.594554 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.594562 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.594577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.594586 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.696960 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.697003 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.697012 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.697026 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.697035 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.799675 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.799718 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.799729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.799744 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.799755 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.902377 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.902431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.902442 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.902460 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:32 crc kubenswrapper[4682]: I1210 10:46:32.902498 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:32Z","lastTransitionTime":"2025-12-10T10:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.005054 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.005108 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.005122 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.005140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.005157 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.107377 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.107416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.107428 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.107446 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.107455 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.210380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.210422 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.210433 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.210453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.210483 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.312606 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.312640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.312649 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.312662 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.312672 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.380933 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.380970 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.380989 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:33 crc kubenswrapper[4682]: E1210 10:46:33.381086 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:33 crc kubenswrapper[4682]: E1210 10:46:33.381147 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:33 crc kubenswrapper[4682]: E1210 10:46:33.381240 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.414970 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.415001 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.415010 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.415025 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.415035 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.517974 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.518047 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.518066 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.518099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.518117 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.620506 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.620553 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.620566 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.620583 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.620598 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.722834 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.722871 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.722882 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.722898 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.722911 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.826784 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.826842 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.826854 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.826871 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.826890 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.930445 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.930883 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.931072 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.931219 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:33 crc kubenswrapper[4682]: I1210 10:46:33.931348 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:33Z","lastTransitionTime":"2025-12-10T10:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.034077 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.034132 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.034149 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.034171 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.034186 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.137744 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.137818 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.137837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.137867 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.137890 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.259200 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.259274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.259296 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.259326 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.259349 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.363372 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.363440 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.363458 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.363525 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.363557 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.380577 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:34 crc kubenswrapper[4682]: E1210 10:46:34.381015 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.391971 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.467007 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.467052 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.467065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.467082 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.467097 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.570447 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.570539 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.570559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.570588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.570607 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.674376 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.674432 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.674452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.674492 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.674505 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.777518 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.777575 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.777588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.777609 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.777623 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.879971 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.880010 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.880021 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.880035 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.880044 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.982460 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.982532 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.982545 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.982564 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:34 crc kubenswrapper[4682]: I1210 10:46:34.982574 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:34Z","lastTransitionTime":"2025-12-10T10:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.084900 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.084943 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.084953 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.084970 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.084981 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.188092 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.188140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.188151 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.188172 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.188185 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.290588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.290630 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.290639 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.290654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.290667 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.381009 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.381036 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.381214 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:35 crc kubenswrapper[4682]: E1210 10:46:35.381317 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:35 crc kubenswrapper[4682]: E1210 10:46:35.381498 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:35 crc kubenswrapper[4682]: E1210 10:46:35.381651 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.393743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.393807 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.393820 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.393841 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.393854 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.496131 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.496167 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.496179 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.496196 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.496207 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.599064 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.599213 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.599244 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.599267 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.599279 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.701540 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.701602 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.701626 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.701654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.701675 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.804059 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.804113 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.804130 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.804152 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.804171 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.907640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.907715 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.907734 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.908236 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:35 crc kubenswrapper[4682]: I1210 10:46:35.908299 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:35Z","lastTransitionTime":"2025-12-10T10:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.011328 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.011358 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.011369 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.011384 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.011396 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.114992 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.115046 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.115057 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.115073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.115085 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.218082 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.218159 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.218187 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.218215 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.218236 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.321439 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.321593 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.321723 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.321760 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.321782 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.380189 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.380346 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.424531 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.424601 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.424618 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.424642 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.424666 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.527460 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.527589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.527609 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.527634 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.527652 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.559044 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.559111 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.559138 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.559164 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.559181 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.580208 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.586509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.586577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.586597 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.586625 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.586647 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.608693 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.614697 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.614770 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.614792 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.614818 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.614835 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.637724 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.642938 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.643015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.643040 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.643071 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.643094 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.663827 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.669990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.670058 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.670081 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.670113 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.670136 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.693991 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:36Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:36 crc kubenswrapper[4682]: E1210 10:46:36.694231 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.696589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.696712 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.696731 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.696754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.696771 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.800311 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.800368 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.800386 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.800408 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.800424 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.902893 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.902969 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.902994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.903028 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:36 crc kubenswrapper[4682]: I1210 10:46:36.903053 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:36Z","lastTransitionTime":"2025-12-10T10:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.006177 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.006250 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.006275 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.006306 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.006329 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.108976 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.109040 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.109076 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.109125 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.109151 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.212678 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.212764 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.212787 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.212820 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.212842 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.316839 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.316936 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.316955 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.316984 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.317010 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.381182 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.381246 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.381304 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:37 crc kubenswrapper[4682]: E1210 10:46:37.381427 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:37 crc kubenswrapper[4682]: E1210 10:46:37.381566 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:37 crc kubenswrapper[4682]: E1210 10:46:37.381697 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.420510 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.420571 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.420589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.420616 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.420639 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.523694 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.523772 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.523797 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.523835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.523861 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.627551 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.627607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.627621 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.627646 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.627660 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.731521 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.731571 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.731581 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.731605 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.731619 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.834515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.834583 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.834595 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.834617 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.834632 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.937654 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.937722 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.937736 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.937758 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:37 crc kubenswrapper[4682]: I1210 10:46:37.937775 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:37Z","lastTransitionTime":"2025-12-10T10:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.041881 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.041955 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.041973 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.042003 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.042022 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.145875 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.145964 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.145990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.146025 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.146048 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.249045 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.249120 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.249145 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.249170 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.249193 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.354417 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.354534 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.354556 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.354587 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.354614 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.380123 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:38 crc kubenswrapper[4682]: E1210 10:46:38.380370 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.458139 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.458614 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.458785 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.458913 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.458990 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.563109 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.563158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.563171 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.563190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.563203 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.666670 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.666714 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.666724 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.666743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.666755 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.769822 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.769904 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.769919 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.769947 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.769962 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.872818 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.872883 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.872893 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.872913 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.872927 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.976519 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.976574 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.976583 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.976604 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:38 crc kubenswrapper[4682]: I1210 10:46:38.976620 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:38Z","lastTransitionTime":"2025-12-10T10:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.079392 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.079453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.079486 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.079515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.079530 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.182188 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.182214 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.182222 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.182236 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.182246 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.284822 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.284892 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.284903 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.285098 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.285144 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.380216 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.380272 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:39 crc kubenswrapper[4682]: E1210 10:46:39.380367 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.380216 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:39 crc kubenswrapper[4682]: E1210 10:46:39.380514 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:39 crc kubenswrapper[4682]: E1210 10:46:39.380618 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.387940 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.387984 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.387997 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.388012 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.388024 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.490529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.490571 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.490580 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.490596 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.490606 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.593509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.593577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.593864 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.593924 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.593941 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.697050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.697086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.697095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.697109 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.697120 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.799505 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.799554 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.799573 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.799595 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.799610 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.902099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.902428 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.902551 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.902664 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:39 crc kubenswrapper[4682]: I1210 10:46:39.902772 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:39Z","lastTransitionTime":"2025-12-10T10:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.005660 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.005711 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.005729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.005750 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.005766 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.108357 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.108384 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.108393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.108406 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.108414 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.211791 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.211862 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.211884 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.211922 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.211962 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.314673 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.314731 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.314750 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.314776 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.314794 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.380851 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:40 crc kubenswrapper[4682]: E1210 10:46:40.381031 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.406464 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.416953 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.417019 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.417030 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.417045 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.417056 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.436215 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.449456 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.465072 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.479400 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.494655 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.511710 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.519809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.519881 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.519899 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.519925 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.519946 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.527962 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.542644 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.555319 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf9794de-1368-4ff9-aa68-73aadaecc504\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c445f7ada23dc8166a355739343a03d78c43a2ac04e2bab918d667ef9c206629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.569140 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.585746 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.605802 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.623385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.623529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.623558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.623595 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.623621 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.630116 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.663905 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.683118 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.700366 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.712786 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.726987 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.727055 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.727078 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.727107 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.727124 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.730784 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:40Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.830197 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.830490 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.830566 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.830641 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.830710 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.934042 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.934100 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.934117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.934141 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:40 crc kubenswrapper[4682]: I1210 10:46:40.934159 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:40Z","lastTransitionTime":"2025-12-10T10:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.036642 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.036676 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.036685 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.036698 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.036708 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.139382 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.139443 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.139463 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.139516 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.139539 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.242031 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.242081 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.242092 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.242110 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.242126 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.345050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.345149 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.345167 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.345190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.345210 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.380454 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.380589 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.380508 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:41 crc kubenswrapper[4682]: E1210 10:46:41.380664 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:41 crc kubenswrapper[4682]: E1210 10:46:41.380779 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:41 crc kubenswrapper[4682]: E1210 10:46:41.380964 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.452739 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.453020 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.453112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.453242 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.453346 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.557246 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.557369 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.557418 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.557443 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.557459 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.661288 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.661363 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.661374 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.661393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.661405 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.764668 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.764721 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.764733 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.764748 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.764759 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.867081 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.867125 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.867133 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.867147 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.867157 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.969767 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.969804 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.969815 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.969830 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:41 crc kubenswrapper[4682]: I1210 10:46:41.969840 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:41Z","lastTransitionTime":"2025-12-10T10:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.072726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.072823 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.073118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.073438 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.073519 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.177175 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.177214 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.177238 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.177259 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.177274 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.279692 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.280011 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.280111 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.280186 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.280252 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.380539 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:42 crc kubenswrapper[4682]: E1210 10:46:42.380769 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.383256 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.383305 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.383344 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.383358 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.383368 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.485839 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.485926 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.485942 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.485961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.485974 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.588400 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.588441 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.588449 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.588462 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.588487 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.692189 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.692260 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.692277 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.692300 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.692318 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.795652 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.795724 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.795748 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.795780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.795803 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.898186 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.898239 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.898249 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.898264 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:42 crc kubenswrapper[4682]: I1210 10:46:42.898273 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:42Z","lastTransitionTime":"2025-12-10T10:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.000936 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.000988 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.000999 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.001016 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.001028 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.104751 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.104805 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.104815 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.104833 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.104844 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.208257 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.208346 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.208372 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.208406 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.208431 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.293566 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.293713 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.293739 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.293707414 +0000 UTC m=+147.613918164 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.293837 4682 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.293946 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.293927661 +0000 UTC m=+147.614138411 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.311807 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.311857 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.311888 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.311906 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.311918 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.380657 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.380771 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.380780 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.380893 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.381099 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.381514 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.381841 4682 scope.go:117] "RemoveContainer" containerID="09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.394350 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.394403 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.394438 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394649 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394671 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394683 4682 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394729 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.394714412 +0000 UTC m=+147.714925162 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394869 4682 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394924 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.394910958 +0000 UTC m=+147.715121728 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.394966 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.395009 4682 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.395020 4682 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:43 crc kubenswrapper[4682]: E1210 10:46:43.395077 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 10:47:47.395061603 +0000 UTC m=+147.715272343 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.415060 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.415113 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.415126 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.415145 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.415158 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.517565 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.517629 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.517639 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.517655 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.517710 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.620331 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.620388 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.620401 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.620429 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.620443 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.722839 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.722906 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.722934 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.722963 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.722986 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.825120 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.825158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.825168 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.825182 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.825193 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.858392 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/2.log" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.929426 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.929501 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.929515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.929535 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:43 crc kubenswrapper[4682]: I1210 10:46:43.929547 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:43Z","lastTransitionTime":"2025-12-10T10:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.032331 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.032396 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.032406 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.032431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.032450 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.140738 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.140817 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.140832 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.140886 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.140909 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.244041 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.244112 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.244133 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.244160 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.244177 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.346762 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.346798 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.346812 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.346832 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.346843 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.380315 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:44 crc kubenswrapper[4682]: E1210 10:46:44.380456 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.449290 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.449343 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.449355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.449373 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.449386 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.552100 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.552158 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.552171 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.552194 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.552212 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.656039 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.656095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.656108 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.656128 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.656139 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.759311 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.759434 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.759449 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.759503 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.759540 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.862885 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.862938 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.862956 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.862980 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.862998 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.868228 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/3.log" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.869142 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/2.log" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.872446 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" exitCode=1 Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.872508 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.872715 4682 scope.go:117] "RemoveContainer" containerID="09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.873753 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:46:44 crc kubenswrapper[4682]: E1210 10:46:44.874000 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.907074 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:44Z\\\",\\\"message\\\":\\\"er/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"63b1440a-0908-4cab-8799-012fa1cf0b07\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-operator-lifecycle-manager/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1210 10:46:44.523716 6802 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.924980 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.938588 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf9794de-1368-4ff9-aa68-73aadaecc504\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c445f7ada23dc8166a355739343a03d78c43a2ac04e2bab918d667ef9c206629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.955610 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.966188 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.966231 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.966243 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.966260 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.966270 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:44Z","lastTransitionTime":"2025-12-10T10:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.973572 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:44 crc kubenswrapper[4682]: I1210 10:46:44.989643 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:44Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.002797 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.019922 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.033954 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.051429 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.068132 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.068187 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.068202 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.068218 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.068232 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.078090 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.099415 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.117719 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.132058 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.150142 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.170059 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.170095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.170106 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.170123 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.170136 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.171097 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.183694 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.197928 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.209672 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:45Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.272214 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.272262 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.272278 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.272299 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.272317 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.375023 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.375085 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.375103 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.375134 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.375156 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.380680 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.380680 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.380797 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:45 crc kubenswrapper[4682]: E1210 10:46:45.381004 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:45 crc kubenswrapper[4682]: E1210 10:46:45.381107 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:45 crc kubenswrapper[4682]: E1210 10:46:45.381256 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.477882 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.477962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.477987 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.478017 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.478040 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.580725 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.580823 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.580877 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.580906 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.580923 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.683690 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.683746 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.683765 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.683795 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.683819 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.786174 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.786233 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.786257 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.786285 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.786305 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.877219 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/3.log" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.888185 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.888210 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.888218 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.888229 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.888238 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.991225 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.991287 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.991305 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.991329 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:45 crc kubenswrapper[4682]: I1210 10:46:45.991346 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:45Z","lastTransitionTime":"2025-12-10T10:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.093915 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.093985 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.094007 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.094037 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.094062 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.197423 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.197488 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.197498 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.197513 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.197542 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.301510 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.301585 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.301609 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.301639 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.301661 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.380648 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.380823 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.403941 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.403998 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.404020 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.404050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.404069 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.509861 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.509917 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.509929 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.509952 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.509965 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.613011 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.613118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.613145 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.613171 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.613192 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.710910 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.711002 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.711017 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.711040 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.711055 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.725066 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.729970 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.730012 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.730025 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.730043 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.730056 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.746681 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.751802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.751875 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.751891 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.751909 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.751920 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.771040 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.775140 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.775176 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.775190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.775210 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.775227 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.789809 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.794411 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.794507 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.794524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.794542 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.794555 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.815779 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:46Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:46 crc kubenswrapper[4682]: E1210 10:46:46.815959 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.818050 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.818129 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.818155 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.818190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.818215 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.920917 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.921508 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.921535 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.921568 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:46 crc kubenswrapper[4682]: I1210 10:46:46.921611 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:46Z","lastTransitionTime":"2025-12-10T10:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.025324 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.025382 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.025439 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.025537 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.025575 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.128366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.128444 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.128505 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.128531 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.128549 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.231444 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.231518 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.231535 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.231558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.231572 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.334889 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.334962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.334973 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.334992 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.335008 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.380454 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.380539 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.380547 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:47 crc kubenswrapper[4682]: E1210 10:46:47.380725 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:47 crc kubenswrapper[4682]: E1210 10:46:47.380871 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:47 crc kubenswrapper[4682]: E1210 10:46:47.381096 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.438971 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.439033 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.439056 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.439083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.439101 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.541331 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.541402 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.541420 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.541446 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.541462 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.644001 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.644069 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.644086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.644114 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.644131 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.747608 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.747674 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.747692 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.747717 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.747736 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.850408 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.850508 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.850522 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.850542 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.850556 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.953331 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.953423 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.953499 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.953524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:47 crc kubenswrapper[4682]: I1210 10:46:47.953540 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:47Z","lastTransitionTime":"2025-12-10T10:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.056576 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.056643 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.056665 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.056692 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.056714 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.160340 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.160384 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.160397 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.160413 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.160424 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.263378 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.263431 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.263447 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.263509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.263528 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.366768 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.366826 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.366835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.366852 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.366862 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.380234 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:48 crc kubenswrapper[4682]: E1210 10:46:48.380539 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.469502 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.469558 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.469568 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.469582 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.469614 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.573014 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.573083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.573104 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.573133 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.573155 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.675888 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.675919 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.675929 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.675944 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.675955 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.779401 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.779493 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.779518 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.779549 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.779571 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.883045 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.883172 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.883197 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.883221 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.883238 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.986464 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.986577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.986600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.986627 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:48 crc kubenswrapper[4682]: I1210 10:46:48.986648 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:48Z","lastTransitionTime":"2025-12-10T10:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.089509 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.089560 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.089572 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.089591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.089608 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.192699 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.192779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.192802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.192835 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.192856 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.296033 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.296065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.296073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.296086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.296096 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.380597 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.380718 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:49 crc kubenswrapper[4682]: E1210 10:46:49.380739 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.380597 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:49 crc kubenswrapper[4682]: E1210 10:46:49.380959 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:49 crc kubenswrapper[4682]: E1210 10:46:49.381077 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.399454 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.399548 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.399570 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.399601 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.399624 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.502429 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.502519 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.502540 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.502565 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.502582 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.605326 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.605380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.605395 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.605416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.605431 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.707301 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.707342 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.707352 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.707366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.707378 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.809545 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.809579 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.809589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.809604 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.809615 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.911837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.911918 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.911939 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.911964 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:49 crc kubenswrapper[4682]: I1210 10:46:49.911981 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:49Z","lastTransitionTime":"2025-12-10T10:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.014542 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.014707 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.014731 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.014746 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.014755 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.117367 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.117430 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.117454 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.117524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.117550 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.220695 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.220749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.220762 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.220780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.220792 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.323547 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.323602 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.323672 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.323715 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.323745 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.380690 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:50 crc kubenswrapper[4682]: E1210 10:46:50.380900 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.400691 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.420587 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.426624 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.426966 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.427152 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.427306 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.427423 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.440553 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.458700 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.474763 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.503346 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09edcb96916f3f96683efe6a5b653b0123de9950eeec4c0250da6b1c64194df6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:14Z\\\",\\\"message\\\":\\\"s/externalversions/factory.go:140\\\\nI1210 10:46:14.207093 6383 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207131 6383 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207219 6383 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207510 6383 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207723 6383 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 10:46:14.207848 6383 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 10:46:14.207856 6383 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.207890 6383 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 10:46:14.208334 6383 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:44Z\\\",\\\"message\\\":\\\"er/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"63b1440a-0908-4cab-8799-012fa1cf0b07\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-operator-lifecycle-manager/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1210 10:46:44.523716 6802 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.521741 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.529816 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.529860 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.529875 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.529896 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.529915 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.540418 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf9794de-1368-4ff9-aa68-73aadaecc504\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c445f7ada23dc8166a355739343a03d78c43a2ac04e2bab918d667ef9c206629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.561652 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.586462 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.605784 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.626886 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.632370 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.632434 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.632452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.632520 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.632546 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.643986 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.656434 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.675369 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.699568 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.714491 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.728908 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.734257 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.734385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.734478 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.734575 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.734659 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.742338 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:50Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.837453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.837543 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.837563 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.837584 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.837603 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.940892 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.940964 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.940981 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.941006 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:50 crc kubenswrapper[4682]: I1210 10:46:50.941026 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:50Z","lastTransitionTime":"2025-12-10T10:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.044333 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.044402 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.044421 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.044447 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.044464 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.147841 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.147970 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.148039 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.148073 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.148133 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.251685 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.251759 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.251779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.251803 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.251820 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.355729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.355780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.355793 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.355812 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.355823 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.380968 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.381004 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.381004 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:51 crc kubenswrapper[4682]: E1210 10:46:51.381461 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:51 crc kubenswrapper[4682]: E1210 10:46:51.381537 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:51 crc kubenswrapper[4682]: E1210 10:46:51.381338 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.459056 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.459127 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.459149 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.459175 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.459193 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.562606 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.562666 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.562684 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.562709 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.562731 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.666415 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.666498 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.666518 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.666539 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.666556 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.769957 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.770039 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.770065 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.770095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.770117 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.874155 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.874257 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.874285 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.874314 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.874336 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.977031 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.977074 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.977083 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.977098 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:51 crc kubenswrapper[4682]: I1210 10:46:51.977107 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:51Z","lastTransitionTime":"2025-12-10T10:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.080171 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.080224 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.080240 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.080272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.080304 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.182777 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.182846 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.182869 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.182900 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.182922 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.285720 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.285779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.285800 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.285851 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.285874 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.380311 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:52 crc kubenswrapper[4682]: E1210 10:46:52.380460 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.387868 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.387899 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.387910 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.387932 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.387941 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.490684 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.490743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.490759 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.490789 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.490810 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.593735 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.593768 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.593776 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.593791 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.593800 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.696417 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.696466 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.696508 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.696523 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.696535 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.798364 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.798403 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.798420 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.798441 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.798454 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.900430 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.900536 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.900559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.900588 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:52 crc kubenswrapper[4682]: I1210 10:46:52.900610 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:52Z","lastTransitionTime":"2025-12-10T10:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.003960 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.004034 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.004057 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.004086 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.004108 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.107192 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.107275 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.107300 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.107330 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.107351 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.210308 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.210371 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.210388 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.210415 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.210433 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.313441 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.313573 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.313591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.313655 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.313673 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.380556 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.380582 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.380701 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:53 crc kubenswrapper[4682]: E1210 10:46:53.380816 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:53 crc kubenswrapper[4682]: E1210 10:46:53.380987 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:53 crc kubenswrapper[4682]: E1210 10:46:53.381075 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.416808 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.416939 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.416961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.416990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.417011 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.519356 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.519398 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.519410 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.519425 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.519436 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.621949 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.621997 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.622009 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.622025 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.622059 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.724553 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.724596 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.724623 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.724637 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.724647 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.828091 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.828143 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.828179 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.828195 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.828208 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.931119 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.931622 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.931839 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.932080 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:53 crc kubenswrapper[4682]: I1210 10:46:53.932286 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:53Z","lastTransitionTime":"2025-12-10T10:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.035148 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.035607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.035624 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.035637 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.035646 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.138877 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.138928 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.138944 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.138962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.138977 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.242030 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.242088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.242099 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.242116 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.242133 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.345285 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.345351 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.345369 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.345393 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.345410 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.380937 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:54 crc kubenswrapper[4682]: E1210 10:46:54.381111 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.448811 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.448874 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.448891 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.448916 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.448934 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.551134 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.551190 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.551204 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.551226 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.551239 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.654479 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.654525 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.654535 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.654554 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.654566 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.756584 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.756625 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.756635 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.756651 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.756663 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.858942 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.859005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.859020 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.859046 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.859060 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.961782 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.961883 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.961895 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.961936 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:54 crc kubenswrapper[4682]: I1210 10:46:54.961949 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:54Z","lastTransitionTime":"2025-12-10T10:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.064622 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.064686 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.064726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.064766 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.064794 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.166660 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.166706 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.166716 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.166729 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.166739 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.269908 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.269968 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.269986 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.270008 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.270025 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.372974 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.373014 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.373025 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.373044 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.373056 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.380784 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.380788 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.380796 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:55 crc kubenswrapper[4682]: E1210 10:46:55.381048 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:55 crc kubenswrapper[4682]: E1210 10:46:55.381203 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:55 crc kubenswrapper[4682]: E1210 10:46:55.381295 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.475772 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.475832 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.475850 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.475880 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.475898 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.578736 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.578821 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.578843 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.578895 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.578912 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.681905 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.681982 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.682060 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.682084 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.682101 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.785889 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.785933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.785949 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.785970 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.785982 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.889906 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.889979 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.890020 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.890053 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.890077 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.992679 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.992718 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.992726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.992740 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:55 crc kubenswrapper[4682]: I1210 10:46:55.992751 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:55Z","lastTransitionTime":"2025-12-10T10:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.095962 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.096062 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.096088 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.096118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.096139 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.199438 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.199566 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.199594 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.199625 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.199647 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.302701 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.302947 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.302973 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.303138 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.303161 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.380594 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:56 crc kubenswrapper[4682]: E1210 10:46:56.380737 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.405849 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.405924 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.405942 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.405972 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.405996 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.508516 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.508586 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.508602 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.508624 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.508639 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.611280 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.611346 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.611358 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.611379 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.611395 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.713805 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.713876 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.713892 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.713911 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.713924 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.816862 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.816963 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.816990 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.817043 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.817070 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.919652 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.919711 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.919726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.919748 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:56 crc kubenswrapper[4682]: I1210 10:46:56.919761 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:56Z","lastTransitionTime":"2025-12-10T10:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.019703 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.019765 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.019777 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.019798 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.019828 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.034196 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.038902 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.038955 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.038969 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.038989 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.039003 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.053051 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.056604 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.056643 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.056657 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.056679 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.056695 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.068450 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.072587 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.072649 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.072663 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.072686 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.072705 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.087692 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.091442 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.091502 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.091515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.091536 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.091552 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.106174 4682 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f31d3580-1486-4f05-9f92-ad8676a17c6a\\\",\\\"systemUUID\\\":\\\"1e536030-2097-46d6-9de1-5c5492a935f2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.106348 4682 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.108728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.108758 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.108779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.108804 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.108820 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.211355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.211421 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.211450 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.211510 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.211532 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.314310 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.314371 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.314385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.314416 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.314430 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.380731 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.380917 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.380736 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.380979 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.381358 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.381848 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.382267 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:46:57 crc kubenswrapper[4682]: E1210 10:46:57.382576 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.401089 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.417018 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.417093 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.417117 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.417151 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.417175 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.423868 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.443002 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.464953 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.482180 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.518938 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.520652 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.520713 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.520726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.520749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.520764 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.534041 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.547589 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.562339 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.574341 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.588864 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.603693 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.619219 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.623535 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.623604 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.623616 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.623640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.623652 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.637183 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.654639 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.677554 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:44Z\\\",\\\"message\\\":\\\"er/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"63b1440a-0908-4cab-8799-012fa1cf0b07\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-operator-lifecycle-manager/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1210 10:46:44.523716 6802 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.689256 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.704194 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf9794de-1368-4ff9-aa68-73aadaecc504\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c445f7ada23dc8166a355739343a03d78c43a2ac04e2bab918d667ef9c206629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.719038 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:46:57Z is after 2025-08-24T17:21:41Z" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.725527 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.725565 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.725573 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.725589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.725600 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.828580 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.828640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.828658 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.828683 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.828701 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.931677 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.931727 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.931747 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.931771 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:57 crc kubenswrapper[4682]: I1210 10:46:57.931789 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:57Z","lastTransitionTime":"2025-12-10T10:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.035027 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.035069 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.035084 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.035101 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.035114 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.138675 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.138738 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.138761 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.138786 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.138803 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.241175 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.241228 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.241239 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.241259 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.241271 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.344253 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.344374 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.344383 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.344399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.344413 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.380769 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:46:58 crc kubenswrapper[4682]: E1210 10:46:58.381207 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.447777 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.447853 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.447873 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.447899 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.447918 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.550100 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.550152 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.550164 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.550183 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.550200 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.653215 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.653272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.653280 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.653318 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.653334 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.756275 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.756339 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.756352 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.756374 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.756388 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.859779 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.859898 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.859922 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.859951 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.859972 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.962794 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.962900 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.962928 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.963005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:58 crc kubenswrapper[4682]: I1210 10:46:58.963029 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:58Z","lastTransitionTime":"2025-12-10T10:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.065697 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.065794 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.065820 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.065859 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.065886 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.168553 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.168592 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.168604 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.168620 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.168631 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.272281 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.272330 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.272345 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.272368 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.272383 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.375313 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.375388 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.375410 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.375439 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.375623 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.380637 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.380680 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.380747 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:46:59 crc kubenswrapper[4682]: E1210 10:46:59.380801 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:46:59 crc kubenswrapper[4682]: E1210 10:46:59.380894 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:46:59 crc kubenswrapper[4682]: E1210 10:46:59.380950 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.484450 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.484589 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.484613 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.484645 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.484667 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.588511 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.588572 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.588596 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.588627 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.588655 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: E1210 10:46:59.614853 4682 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:59 crc kubenswrapper[4682]: E1210 10:46:59.614964 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs podName:f308e36d-4856-4306-adec-390e40daaee3 nodeName:}" failed. No retries permitted until 2025-12-10 10:48:03.614931924 +0000 UTC m=+163.935142704 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs") pod "network-metrics-daemon-6c5qg" (UID: "f308e36d-4856-4306-adec-390e40daaee3") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.614662 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.691845 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.691905 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.691928 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.691956 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.691977 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.794241 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.794272 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.794282 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.794297 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.794309 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.897686 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.898106 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.898355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.898607 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.898835 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:46:59Z","lastTransitionTime":"2025-12-10T10:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.903092 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:46:59 crc kubenswrapper[4682]: I1210 10:46:59.904700 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:46:59 crc kubenswrapper[4682]: E1210 10:46:59.905089 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.002930 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.003005 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.003027 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.003057 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.003081 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.105849 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.105903 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.105915 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.105932 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.105945 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.209584 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.209640 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.209652 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.209670 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.209682 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.312132 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.312163 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.312172 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.312185 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.312194 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.380885 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:00 crc kubenswrapper[4682]: E1210 10:47:00.381014 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.402940 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d9897c5-0762-48b6-9d2c-92bff335e843\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e3f69c3cfdf7398e5c69b1842cabd56b40bc7da0b43a5cad458ba2e2b8bc05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://acefd3611f57ebf49c983356dee202fb9a485e73bd01dc9170fe9896f7ab4efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3afb15a6ad7c3b803551f5557e12b9c77990050b22fe827512d210e1bec4672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ca7242d1be672473ab65b69f50eef3640a277c05cdcc2b86ccad7e199e951b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef1a8d715a993ab316dd536dedc841cac6d732524fe05b0c08ac25aebb4bad6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://471079ef18f6e9d70d982d429b115c498b592a4f228f3c1a7348a143c59c3145\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a3d148045a69dfa17243cee5763a90e263565714acf7da9cbdd6bc0759e3aff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fbbf9ed3b1043df27901c00f2d28c55e60b22d3e454c46919977edcbe79ccce6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.415380 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.415448 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.415495 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.415523 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.415540 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.418594 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.436837 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9b7774472c392691988290a67bb87eb133f8f0bfc4635c2ef4ca51b348fbe13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1179639a9c0f92b9dfa149278d1ea8cf7f14538f53c97bd018466d28901cee8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.448890 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f308e36d-4856-4306-adec-390e40daaee3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g94xk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6c5qg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.464978 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dabbdff6-ec4e-4092-8603-9393e15c5fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://165bea93910533476f1f7967c886d75cf713071ffc8ab059d57412eb76ee49a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1cfa55e2c1cdde7e0cfd2b5a6c457ec53f53daebc022ec2a15c570cb2aaa5b18\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bb225f30a6be69363ae60dd82afbd34eb5c5d36a0b450515202074e19cb1f3e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.481676 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eac5f386-8ee7-453f-9d17-fb57881c7d0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86054212a009f28d22b4dc4f9181fbea05c535d929160f8c05e8d649745c2bb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://857d51665f1636c85cef233d00747420b56fcda68ed66d9f4628304e7868242c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://008323d5ab9db5bef027d390691379aad0773741eec1a3d48a7a6a9d23d9fe0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a928db56f54afba87cd6f7e94ba7ec4f03e09db1dc8352ee6d73eacbfd6a294\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.498846 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ba3b7cb988de381f9a67d29e57fce550a27192139306d152c0b130398b56ac9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.514774 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.517766 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.517802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.517810 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.517823 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.517833 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.529928 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-9s7rc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9ecb90c-92b0-4a1b-b7a2-21da0f41d594\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3103899209fac81786f61cb3b9f175f58e493a61c051951dab560ffc831d5b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rxzcx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-9s7rc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.560665 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4402e6-a6f6-4970-8392-9f1856b52eb4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:44Z\\\",\\\"message\\\":\\\"er/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"63b1440a-0908-4cab-8799-012fa1cf0b07\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-operator-lifecycle-manager/olm-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.168\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1210 10:46:44.523716 6802 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:46:43Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk8dd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vmhkf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.582037 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0490123-88b1-4c35-ad45-3cf66d5d26e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7dddaeaa97729699a9d9679123b2339dda39116612f39087cbd162920fc8930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdd0ee1d6d60908d4743fe4954f999752590b1ff4e61941f899526873ac6c60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbkp4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wxh8p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.599621 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf9794de-1368-4ff9-aa68-73aadaecc504\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c445f7ada23dc8166a355739343a03d78c43a2ac04e2bab918d667ef9c206629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0d4df22bd4bb69e72e795eddc4b87532730c9c2c8d3bfaa6b4f41f5c6fe9676e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.620126 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.620911 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.620947 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.620960 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.620975 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.620985 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.635666 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bafd4238ae9e27f12d485b30de32b1d02f7aad0ac43ba96b7a1a01146cf682aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.655125 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zs6ss" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a005c959-3805-4e15-aa3a-7093815e03b8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T10:46:29Z\\\",\\\"message\\\":\\\"2025-12-10T10:45:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee\\\\n2025-12-10T10:45:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1e6b9a20-c1ad-4e44-a7e0-1640aa03e8ee to /host/opt/cni/bin/\\\\n2025-12-10T10:45:44Z [verbose] multus-daemon started\\\\n2025-12-10T10:45:44Z [verbose] Readiness Indicator file check\\\\n2025-12-10T10:46:29Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:46:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wd7rh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zs6ss\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.670958 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-v27lh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19919360-1e01-4b1c-a2fe-d7b0f7b582c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e781e17e4a897d87ed3227502eab1eb71b527c970d8da918bb370ac53034843\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://918bb6ccf87e6a1d36fe9f91bef34d5c77e62d175217d8a11918c562b3339656\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee7274e9232229b14c46c0e9d878eabe0d0808ddfcda532d380e2bcc835e9cd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8836c0aa5abce02ef11f055a68e4ad1e39eff301d818ea545e3e9578b4a168f9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ded22c30b19601f801397e1f2ff2cbf7d02487e37ad031bb424ba894749a0888\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfd3bc9c072bb7b0f689bbe8b3f32efdb50025474993ffdb08272c0b25c5be74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b700b97b953a50c6086b4a98c646f5aa75a592ff976b7dc42f9f76e0414fefc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmz79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-v27lh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.686732 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"412547b9-dcab-487e-a6fc-bb7e3fe2b324\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:46:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1210 10:45:39.155809 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1210 10:45:39.155936 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 10:45:39.156961 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1407541289/tls.crt::/tmp/serving-cert-1407541289/tls.key\\\\\\\"\\\\nI1210 10:45:39.778290 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 10:45:40.824045 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 10:45:40.824082 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 10:45:40.824106 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 10:45:40.824112 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 10:45:40.832972 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 10:45:40.832997 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833002 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 10:45:40.833007 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1210 10:45:40.833010 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 10:45:40.833013 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 10:45:40.833016 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1210 10:45:40.833187 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1210 10:45:40.840386 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T10:45:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T10:45:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.699484 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xkwtt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9072c21-61ad-489f-8603-5f5699ad5d31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7778ef523bfdc44612b6c266c9340f09182bd3c3ccb4852bec3e5e2afbeb15da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jnw8q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xkwtt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.716171 4682 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b504d5b4-49dc-499d-b17c-957131ba411e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T10:45:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://982c9bc1bed2d92fd4347c346f109a3b1ee8ea92e5a4aee415538f8cd420a77c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T10:45:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rvhsf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T10:45:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-58skk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T10:47:00Z is after 2025-08-24T17:21:41Z" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.723946 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.724003 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.724021 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.724044 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.724062 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.826877 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.826915 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.826927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.826944 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.826957 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.929788 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.929868 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.929891 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.929933 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:00 crc kubenswrapper[4682]: I1210 10:47:00.929968 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:00Z","lastTransitionTime":"2025-12-10T10:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.032985 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.033353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.033646 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.033854 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.034005 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.136236 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.136266 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.136274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.136287 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.136296 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.238435 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.238487 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.238499 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.238514 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.238525 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.340961 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.340994 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.341002 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.341015 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.341023 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.381075 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:01 crc kubenswrapper[4682]: E1210 10:47:01.381442 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.381146 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:01 crc kubenswrapper[4682]: E1210 10:47:01.381687 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.381128 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:01 crc kubenswrapper[4682]: E1210 10:47:01.381949 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.443618 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.443687 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.443710 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.443737 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.443758 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.546299 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.546342 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.546353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.546368 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.546380 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.648453 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.648511 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.648521 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.648538 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.648549 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.751044 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.751095 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.751107 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.751121 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.751131 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.854878 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.854938 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.854950 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.854971 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.854989 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.957376 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.957449 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.957466 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.957525 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:01 crc kubenswrapper[4682]: I1210 10:47:01.957561 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:01Z","lastTransitionTime":"2025-12-10T10:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.060289 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.060339 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.060350 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.060366 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.060378 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.163661 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.163722 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.163738 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.163770 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.163786 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.266024 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.266625 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.266647 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.266668 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.266683 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.369635 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.369708 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.369725 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.369749 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.369766 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.380015 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:02 crc kubenswrapper[4682]: E1210 10:47:02.380244 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.472617 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.472665 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.472691 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.472722 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.472744 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.576309 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.576364 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.576378 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.576399 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.576417 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.678687 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.678743 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.678759 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.678780 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.678796 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.780726 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.780763 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.780795 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.780814 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.780825 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.883265 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.883318 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.883329 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.883355 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.883377 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.985437 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.985728 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.985847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.985948 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:02 crc kubenswrapper[4682]: I1210 10:47:02.986026 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:02Z","lastTransitionTime":"2025-12-10T10:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.089297 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.089375 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.089394 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.089418 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.089434 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.193008 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.193290 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.193402 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.193559 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.193674 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.297291 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.297377 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.297414 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.297452 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.297526 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.380716 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.380817 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.380827 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:03 crc kubenswrapper[4682]: E1210 10:47:03.380946 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:03 crc kubenswrapper[4682]: E1210 10:47:03.381080 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:03 crc kubenswrapper[4682]: E1210 10:47:03.381182 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.400098 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.400352 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.400591 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.400802 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.400971 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.503862 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.504228 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.504379 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.504573 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.504731 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.608242 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.608302 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.608326 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.608353 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.608374 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.711153 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.711199 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.711215 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.711240 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.711257 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.814317 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.814385 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.814409 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.814436 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.814456 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.917189 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.917229 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.917242 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.917257 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:03 crc kubenswrapper[4682]: I1210 10:47:03.917269 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:03Z","lastTransitionTime":"2025-12-10T10:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.019886 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.019924 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.019932 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.019947 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.019956 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.122545 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.123071 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.123097 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.123128 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.123152 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.225539 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.225573 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.225583 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.225600 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.225610 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.328379 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.328532 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.328560 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.328586 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.328602 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.381042 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:04 crc kubenswrapper[4682]: E1210 10:47:04.381351 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.431118 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.431196 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.431223 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.431251 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.431271 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.534565 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.534625 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.534635 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.534659 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.534671 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.637702 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.637766 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.637783 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.637809 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.637827 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.741356 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.741456 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.741530 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.741566 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.741590 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.844425 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.844511 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.844529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.844553 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.844570 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.947754 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.947837 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.947865 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.947897 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:04 crc kubenswrapper[4682]: I1210 10:47:04.947922 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:04Z","lastTransitionTime":"2025-12-10T10:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.051220 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.051284 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.051303 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.051328 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.051378 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.154626 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.154718 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.154740 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.154774 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.154797 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.258274 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.258323 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.258338 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.258361 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.258377 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.380394 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.380463 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:05 crc kubenswrapper[4682]: E1210 10:47:05.380647 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:05 crc kubenswrapper[4682]: E1210 10:47:05.380942 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.380992 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:05 crc kubenswrapper[4682]: E1210 10:47:05.381233 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.382775 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.382847 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.382870 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.382895 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.382917 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.486604 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.486667 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.486680 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.486703 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.486717 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.589969 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.590044 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.590054 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.590089 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.590101 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.693412 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.693515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.693529 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.693553 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.693568 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.796242 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.796301 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.796315 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.796336 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.796349 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.903204 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.903277 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.903291 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.903311 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:05 crc kubenswrapper[4682]: I1210 10:47:05.903327 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:05Z","lastTransitionTime":"2025-12-10T10:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.006825 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.006945 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.006969 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.007002 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.007025 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.109927 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.109970 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.109978 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.110014 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.110025 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.213312 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.213376 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.213389 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.213412 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.213429 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.319697 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.319757 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.319768 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.319786 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.319796 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.380719 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:06 crc kubenswrapper[4682]: E1210 10:47:06.380864 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.423515 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.423577 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.423590 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.423615 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.423629 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.526627 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.526685 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.526695 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.526720 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.526732 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.629587 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.629637 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.629648 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.629677 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.629690 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.732820 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.732859 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.732871 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.732888 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.732899 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.835890 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.836434 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.836580 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.836678 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.836777 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.940544 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.940959 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.941408 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.941879 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:06 crc kubenswrapper[4682]: I1210 10:47:06.942314 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:06Z","lastTransitionTime":"2025-12-10T10:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.046110 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.046170 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.046185 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.046206 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.046219 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:07Z","lastTransitionTime":"2025-12-10T10:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.136433 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.136678 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.136712 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.136733 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.136743 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:07Z","lastTransitionTime":"2025-12-10T10:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.166414 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.166511 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.166524 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.166547 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.166563 4682 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T10:47:07Z","lastTransitionTime":"2025-12-10T10:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.208633 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc"] Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.209349 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.211668 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.212013 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.212115 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.212303 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.255724 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-9s7rc" podStartSLOduration=87.255701185 podStartE2EDuration="1m27.255701185s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.241143049 +0000 UTC m=+107.561353859" watchObservedRunningTime="2025-12-10 10:47:07.255701185 +0000 UTC m=+107.575911945" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.290337 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=87.290314757 podStartE2EDuration="1m27.290314757s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.274357869 +0000 UTC m=+107.594568629" watchObservedRunningTime="2025-12-10 10:47:07.290314757 +0000 UTC m=+107.610525527" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.304881 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=59.304861382 podStartE2EDuration="59.304861382s" podCreationTimestamp="2025-12-10 10:46:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.290961128 +0000 UTC m=+107.611171918" watchObservedRunningTime="2025-12-10 10:47:07.304861382 +0000 UTC m=+107.625072132" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.322059 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-zs6ss" podStartSLOduration=87.32203806 podStartE2EDuration="1m27.32203806s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.322017339 +0000 UTC m=+107.642228109" watchObservedRunningTime="2025-12-10 10:47:07.32203806 +0000 UTC m=+107.642248820" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.366981 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-v27lh" podStartSLOduration=86.366957793 podStartE2EDuration="1m26.366957793s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.342045374 +0000 UTC m=+107.662256134" watchObservedRunningTime="2025-12-10 10:47:07.366957793 +0000 UTC m=+107.687168543" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.380819 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:07 crc kubenswrapper[4682]: E1210 10:47:07.381461 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.381127 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:07 crc kubenswrapper[4682]: E1210 10:47:07.381677 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.381094 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:07 crc kubenswrapper[4682]: E1210 10:47:07.381946 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.400090 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wxh8p" podStartSLOduration=86.400069819 podStartE2EDuration="1m26.400069819s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.384259814 +0000 UTC m=+107.704470564" watchObservedRunningTime="2025-12-10 10:47:07.400069819 +0000 UTC m=+107.720280569" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.400243 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=33.400238444 podStartE2EDuration="33.400238444s" podCreationTimestamp="2025-12-10 10:46:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.399983846 +0000 UTC m=+107.720194616" watchObservedRunningTime="2025-12-10 10:47:07.400238444 +0000 UTC m=+107.720449194" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.402560 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb7341ec-dcaf-45c5-b563-20f21e99a14c-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.402628 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7341ec-dcaf-45c5-b563-20f21e99a14c-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.402665 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb7341ec-dcaf-45c5-b563-20f21e99a14c-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.402690 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb7341ec-dcaf-45c5-b563-20f21e99a14c-service-ca\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.402720 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb7341ec-dcaf-45c5-b563-20f21e99a14c-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.463834 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=86.463812782 podStartE2EDuration="1m26.463812782s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.463670667 +0000 UTC m=+107.783881437" watchObservedRunningTime="2025-12-10 10:47:07.463812782 +0000 UTC m=+107.784023542" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.475660 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-xkwtt" podStartSLOduration=87.475642812 podStartE2EDuration="1m27.475642812s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.475552189 +0000 UTC m=+107.795762959" watchObservedRunningTime="2025-12-10 10:47:07.475642812 +0000 UTC m=+107.795853562" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.500326 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podStartSLOduration=87.500304123 podStartE2EDuration="1m27.500304123s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.487659767 +0000 UTC m=+107.807870527" watchObservedRunningTime="2025-12-10 10:47:07.500304123 +0000 UTC m=+107.820514883" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503316 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb7341ec-dcaf-45c5-b563-20f21e99a14c-service-ca\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503363 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb7341ec-dcaf-45c5-b563-20f21e99a14c-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503417 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb7341ec-dcaf-45c5-b563-20f21e99a14c-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503450 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7341ec-dcaf-45c5-b563-20f21e99a14c-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503496 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb7341ec-dcaf-45c5-b563-20f21e99a14c-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503544 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb7341ec-dcaf-45c5-b563-20f21e99a14c-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.503555 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb7341ec-dcaf-45c5-b563-20f21e99a14c-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.504325 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb7341ec-dcaf-45c5-b563-20f21e99a14c-service-ca\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.509884 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb7341ec-dcaf-45c5-b563-20f21e99a14c-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.518797 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb7341ec-dcaf-45c5-b563-20f21e99a14c-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-shllc\" (UID: \"eb7341ec-dcaf-45c5-b563-20f21e99a14c\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.524785 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.543946 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=84.543927546 podStartE2EDuration="1m24.543927546s" podCreationTimestamp="2025-12-10 10:45:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.524510469 +0000 UTC m=+107.844721229" watchObservedRunningTime="2025-12-10 10:47:07.543927546 +0000 UTC m=+107.864138296" Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.954606 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" event={"ID":"eb7341ec-dcaf-45c5-b563-20f21e99a14c","Type":"ContainerStarted","Data":"6eeee49dc0a74481ae1673f77fd5a1708a624b5c3bf2727bffcb6a8b852699c3"} Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.954668 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" event={"ID":"eb7341ec-dcaf-45c5-b563-20f21e99a14c","Type":"ContainerStarted","Data":"258af3843b0f2444757e35692aaf596502d3604119b3db9c4746d473ca41dd7c"} Dec 10 10:47:07 crc kubenswrapper[4682]: I1210 10:47:07.966849 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-shllc" podStartSLOduration=87.966826508 podStartE2EDuration="1m27.966826508s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:07.966187248 +0000 UTC m=+108.286397998" watchObservedRunningTime="2025-12-10 10:47:07.966826508 +0000 UTC m=+108.287037268" Dec 10 10:47:08 crc kubenswrapper[4682]: I1210 10:47:08.380596 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:08 crc kubenswrapper[4682]: E1210 10:47:08.380747 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:09 crc kubenswrapper[4682]: I1210 10:47:09.380549 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:09 crc kubenswrapper[4682]: I1210 10:47:09.380612 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:09 crc kubenswrapper[4682]: I1210 10:47:09.380549 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:09 crc kubenswrapper[4682]: E1210 10:47:09.380689 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:09 crc kubenswrapper[4682]: E1210 10:47:09.380773 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:09 crc kubenswrapper[4682]: E1210 10:47:09.380854 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:10 crc kubenswrapper[4682]: I1210 10:47:10.381063 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:10 crc kubenswrapper[4682]: E1210 10:47:10.382603 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:11 crc kubenswrapper[4682]: I1210 10:47:11.381223 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:47:11 crc kubenswrapper[4682]: E1210 10:47:11.381378 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:47:11 crc kubenswrapper[4682]: I1210 10:47:11.381567 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:11 crc kubenswrapper[4682]: E1210 10:47:11.381620 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:11 crc kubenswrapper[4682]: I1210 10:47:11.381735 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:11 crc kubenswrapper[4682]: E1210 10:47:11.381790 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:11 crc kubenswrapper[4682]: I1210 10:47:11.381881 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:11 crc kubenswrapper[4682]: E1210 10:47:11.381924 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:12 crc kubenswrapper[4682]: I1210 10:47:12.380889 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:12 crc kubenswrapper[4682]: E1210 10:47:12.381323 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:13 crc kubenswrapper[4682]: I1210 10:47:13.380738 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:13 crc kubenswrapper[4682]: I1210 10:47:13.380828 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:13 crc kubenswrapper[4682]: E1210 10:47:13.380880 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:13 crc kubenswrapper[4682]: I1210 10:47:13.380738 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:13 crc kubenswrapper[4682]: E1210 10:47:13.381007 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:13 crc kubenswrapper[4682]: E1210 10:47:13.381193 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:14 crc kubenswrapper[4682]: I1210 10:47:14.379996 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:14 crc kubenswrapper[4682]: E1210 10:47:14.380152 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:15 crc kubenswrapper[4682]: I1210 10:47:15.380291 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:15 crc kubenswrapper[4682]: I1210 10:47:15.380366 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:15 crc kubenswrapper[4682]: E1210 10:47:15.380454 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:15 crc kubenswrapper[4682]: I1210 10:47:15.380303 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:15 crc kubenswrapper[4682]: E1210 10:47:15.380606 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:15 crc kubenswrapper[4682]: E1210 10:47:15.380908 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.380120 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:16 crc kubenswrapper[4682]: E1210 10:47:16.380268 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.985034 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/1.log" Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.985995 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/0.log" Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.986057 4682 generic.go:334] "Generic (PLEG): container finished" podID="a005c959-3805-4e15-aa3a-7093815e03b8" containerID="a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd" exitCode=1 Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.986091 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerDied","Data":"a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd"} Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.986136 4682 scope.go:117] "RemoveContainer" containerID="18ddd4fc3d36f693aa6181a5ed68b8344aa07fcdb140f891e4fe959a3b65b873" Dec 10 10:47:16 crc kubenswrapper[4682]: I1210 10:47:16.986791 4682 scope.go:117] "RemoveContainer" containerID="a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd" Dec 10 10:47:16 crc kubenswrapper[4682]: E1210 10:47:16.987084 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-zs6ss_openshift-multus(a005c959-3805-4e15-aa3a-7093815e03b8)\"" pod="openshift-multus/multus-zs6ss" podUID="a005c959-3805-4e15-aa3a-7093815e03b8" Dec 10 10:47:17 crc kubenswrapper[4682]: I1210 10:47:17.379940 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:17 crc kubenswrapper[4682]: I1210 10:47:17.379940 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:17 crc kubenswrapper[4682]: E1210 10:47:17.380245 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:17 crc kubenswrapper[4682]: I1210 10:47:17.379940 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:17 crc kubenswrapper[4682]: E1210 10:47:17.380112 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:17 crc kubenswrapper[4682]: E1210 10:47:17.380331 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:17 crc kubenswrapper[4682]: I1210 10:47:17.991465 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/1.log" Dec 10 10:47:18 crc kubenswrapper[4682]: I1210 10:47:18.380189 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:18 crc kubenswrapper[4682]: E1210 10:47:18.380314 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:19 crc kubenswrapper[4682]: I1210 10:47:19.381172 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:19 crc kubenswrapper[4682]: I1210 10:47:19.381173 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:19 crc kubenswrapper[4682]: E1210 10:47:19.381529 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:19 crc kubenswrapper[4682]: E1210 10:47:19.381653 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:19 crc kubenswrapper[4682]: I1210 10:47:19.381188 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:19 crc kubenswrapper[4682]: E1210 10:47:19.381998 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:20 crc kubenswrapper[4682]: I1210 10:47:20.382215 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:20 crc kubenswrapper[4682]: E1210 10:47:20.382374 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:20 crc kubenswrapper[4682]: E1210 10:47:20.426667 4682 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 10 10:47:20 crc kubenswrapper[4682]: E1210 10:47:20.659638 4682 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:21 crc kubenswrapper[4682]: I1210 10:47:21.380376 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:21 crc kubenswrapper[4682]: I1210 10:47:21.380582 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:21 crc kubenswrapper[4682]: I1210 10:47:21.380686 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:21 crc kubenswrapper[4682]: E1210 10:47:21.380619 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:21 crc kubenswrapper[4682]: E1210 10:47:21.380812 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:21 crc kubenswrapper[4682]: E1210 10:47:21.380976 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:22 crc kubenswrapper[4682]: I1210 10:47:22.380388 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:22 crc kubenswrapper[4682]: E1210 10:47:22.380665 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:22 crc kubenswrapper[4682]: I1210 10:47:22.381536 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:47:22 crc kubenswrapper[4682]: E1210 10:47:22.381761 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-vmhkf_openshift-ovn-kubernetes(0d4402e6-a6f6-4970-8392-9f1856b52eb4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" Dec 10 10:47:23 crc kubenswrapper[4682]: I1210 10:47:23.380517 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:23 crc kubenswrapper[4682]: I1210 10:47:23.380517 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:23 crc kubenswrapper[4682]: E1210 10:47:23.380680 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:23 crc kubenswrapper[4682]: I1210 10:47:23.380518 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:23 crc kubenswrapper[4682]: E1210 10:47:23.381080 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:23 crc kubenswrapper[4682]: E1210 10:47:23.380998 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:24 crc kubenswrapper[4682]: I1210 10:47:24.381418 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:24 crc kubenswrapper[4682]: E1210 10:47:24.381564 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:25 crc kubenswrapper[4682]: I1210 10:47:25.380819 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:25 crc kubenswrapper[4682]: I1210 10:47:25.380917 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:25 crc kubenswrapper[4682]: I1210 10:47:25.381072 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:25 crc kubenswrapper[4682]: E1210 10:47:25.381564 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:25 crc kubenswrapper[4682]: E1210 10:47:25.382113 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:25 crc kubenswrapper[4682]: E1210 10:47:25.382191 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:25 crc kubenswrapper[4682]: E1210 10:47:25.661234 4682 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:26 crc kubenswrapper[4682]: I1210 10:47:26.380973 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:26 crc kubenswrapper[4682]: E1210 10:47:26.381155 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:27 crc kubenswrapper[4682]: I1210 10:47:27.380871 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:27 crc kubenswrapper[4682]: E1210 10:47:27.381039 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:27 crc kubenswrapper[4682]: I1210 10:47:27.380906 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:27 crc kubenswrapper[4682]: E1210 10:47:27.381120 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:27 crc kubenswrapper[4682]: I1210 10:47:27.381617 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:27 crc kubenswrapper[4682]: E1210 10:47:27.381739 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:28 crc kubenswrapper[4682]: I1210 10:47:28.379988 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:28 crc kubenswrapper[4682]: E1210 10:47:28.380219 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:29 crc kubenswrapper[4682]: I1210 10:47:29.380553 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:29 crc kubenswrapper[4682]: I1210 10:47:29.380559 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:29 crc kubenswrapper[4682]: E1210 10:47:29.381091 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:29 crc kubenswrapper[4682]: E1210 10:47:29.381172 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:29 crc kubenswrapper[4682]: I1210 10:47:29.380636 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:29 crc kubenswrapper[4682]: E1210 10:47:29.381311 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:30 crc kubenswrapper[4682]: I1210 10:47:30.381702 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:30 crc kubenswrapper[4682]: E1210 10:47:30.381848 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:30 crc kubenswrapper[4682]: I1210 10:47:30.381909 4682 scope.go:117] "RemoveContainer" containerID="a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd" Dec 10 10:47:30 crc kubenswrapper[4682]: E1210 10:47:30.661789 4682 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:31 crc kubenswrapper[4682]: I1210 10:47:31.033861 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/1.log" Dec 10 10:47:31 crc kubenswrapper[4682]: I1210 10:47:31.033913 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerStarted","Data":"c06143a1c59cfb88d374e761a7c11462e0b30d2649b518183753eba214aa6465"} Dec 10 10:47:31 crc kubenswrapper[4682]: I1210 10:47:31.380267 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:31 crc kubenswrapper[4682]: I1210 10:47:31.380344 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:31 crc kubenswrapper[4682]: E1210 10:47:31.380385 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:31 crc kubenswrapper[4682]: I1210 10:47:31.380344 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:31 crc kubenswrapper[4682]: E1210 10:47:31.380603 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:31 crc kubenswrapper[4682]: E1210 10:47:31.380538 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:32 crc kubenswrapper[4682]: I1210 10:47:32.380426 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:32 crc kubenswrapper[4682]: E1210 10:47:32.380645 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:33 crc kubenswrapper[4682]: I1210 10:47:33.380162 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:33 crc kubenswrapper[4682]: E1210 10:47:33.380680 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:33 crc kubenswrapper[4682]: I1210 10:47:33.381275 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:33 crc kubenswrapper[4682]: E1210 10:47:33.381430 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:33 crc kubenswrapper[4682]: I1210 10:47:33.381674 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:33 crc kubenswrapper[4682]: E1210 10:47:33.381818 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:34 crc kubenswrapper[4682]: I1210 10:47:34.380902 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:34 crc kubenswrapper[4682]: E1210 10:47:34.381038 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:35 crc kubenswrapper[4682]: I1210 10:47:35.380950 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:35 crc kubenswrapper[4682]: I1210 10:47:35.381026 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:35 crc kubenswrapper[4682]: I1210 10:47:35.380962 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:35 crc kubenswrapper[4682]: E1210 10:47:35.381102 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:35 crc kubenswrapper[4682]: E1210 10:47:35.381435 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:35 crc kubenswrapper[4682]: E1210 10:47:35.381699 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:35 crc kubenswrapper[4682]: I1210 10:47:35.381881 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:47:35 crc kubenswrapper[4682]: E1210 10:47:35.663664 4682 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.055829 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/3.log" Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.058431 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerStarted","Data":"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290"} Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.058938 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.086242 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podStartSLOduration=115.086222316 podStartE2EDuration="1m55.086222316s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:36.085208929 +0000 UTC m=+136.405419699" watchObservedRunningTime="2025-12-10 10:47:36.086222316 +0000 UTC m=+136.406433066" Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.169188 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-6c5qg"] Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.169927 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:36 crc kubenswrapper[4682]: E1210 10:47:36.170066 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:36 crc kubenswrapper[4682]: I1210 10:47:36.380784 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:36 crc kubenswrapper[4682]: E1210 10:47:36.380917 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:37 crc kubenswrapper[4682]: I1210 10:47:37.380170 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:37 crc kubenswrapper[4682]: I1210 10:47:37.380170 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:37 crc kubenswrapper[4682]: E1210 10:47:37.380357 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:37 crc kubenswrapper[4682]: E1210 10:47:37.380431 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:38 crc kubenswrapper[4682]: I1210 10:47:38.380405 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:38 crc kubenswrapper[4682]: E1210 10:47:38.380599 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:38 crc kubenswrapper[4682]: I1210 10:47:38.380752 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:38 crc kubenswrapper[4682]: E1210 10:47:38.380913 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:39 crc kubenswrapper[4682]: I1210 10:47:39.380914 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:39 crc kubenswrapper[4682]: I1210 10:47:39.380968 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:39 crc kubenswrapper[4682]: E1210 10:47:39.381997 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 10:47:39 crc kubenswrapper[4682]: E1210 10:47:39.382242 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 10:47:40 crc kubenswrapper[4682]: I1210 10:47:40.380394 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:40 crc kubenswrapper[4682]: E1210 10:47:40.381387 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6c5qg" podUID="f308e36d-4856-4306-adec-390e40daaee3" Dec 10 10:47:40 crc kubenswrapper[4682]: I1210 10:47:40.381592 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:40 crc kubenswrapper[4682]: E1210 10:47:40.381845 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 10:47:41 crc kubenswrapper[4682]: I1210 10:47:41.380508 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:41 crc kubenswrapper[4682]: I1210 10:47:41.380832 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:41 crc kubenswrapper[4682]: I1210 10:47:41.383016 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 10:47:41 crc kubenswrapper[4682]: I1210 10:47:41.383212 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 10:47:42 crc kubenswrapper[4682]: I1210 10:47:42.380645 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:42 crc kubenswrapper[4682]: I1210 10:47:42.380701 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:47:42 crc kubenswrapper[4682]: I1210 10:47:42.385036 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 10:47:42 crc kubenswrapper[4682]: I1210 10:47:42.385036 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 10:47:42 crc kubenswrapper[4682]: I1210 10:47:42.385293 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 10 10:47:42 crc kubenswrapper[4682]: I1210 10:47:42.385665 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.357969 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:47 crc kubenswrapper[4682]: E1210 10:47:47.358367 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:49:49.358345732 +0000 UTC m=+269.678556492 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.358592 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.359388 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.460580 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.460642 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.460673 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.467397 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.468009 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.468571 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.499154 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.697969 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 10:47:47 crc kubenswrapper[4682]: I1210 10:47:47.707544 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.104011 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"32525fd22e9276ebdfe29889123d0a5c3431be244359a41b077e620e94d82b94"} Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.105170 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f11e6b951bb07b2db0d7f230d8a5da6f7417dc853ad469e753a6e38be08abbf2"} Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.106281 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"d4ae939861eda615df2bc02368b2ca83dabfd5abf64e31a7acf35188e2d5c455"} Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.106311 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"abf240ada94209853b2afd610b2ca20b8ad85a2aa3463696ba825bca23b3ec99"} Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.441082 4682 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.485573 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ccs9l"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.486404 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.486823 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.486874 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-dqndv"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.487672 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.487940 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.488866 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4zh9p"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.489228 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.490823 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.491203 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.491615 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.491870 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.492038 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.492176 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.492427 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.492575 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.492682 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.492802 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.493068 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.493184 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.493318 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.493686 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.493795 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.493996 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.494104 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.494210 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.494359 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.494581 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.495816 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.496154 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.496511 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.496638 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.497218 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.501774 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.502252 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.505200 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.505255 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.505200 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.505430 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.506381 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.509257 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.509822 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.513236 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.513346 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.513384 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.518911 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.519899 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.520355 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.520516 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.521672 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.522142 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.525046 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.525645 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.526337 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.526830 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.527335 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.527518 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.539867 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.542324 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.542447 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wxvt5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.542929 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.546487 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.547027 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.548508 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.553684 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.555547 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mpnmc"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.556031 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.556038 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.556624 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.557185 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vl6t7"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.557668 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.557754 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.558198 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.558728 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.558745 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.558939 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559064 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559182 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559288 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559427 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559485 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-ftd94"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559549 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.559848 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.560057 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.560231 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.561710 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-jfqfn"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.562041 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.563892 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.570840 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5nt7b"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.571639 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.571915 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ghhnn"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.572026 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.572197 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.572744 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.573337 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.573637 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.575564 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.600581 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.601040 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.602462 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.602695 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.602867 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.603706 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.603850 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.604103 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.604232 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.604358 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.610554 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.618876 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.619174 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.619700 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.619833 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.620400 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.620541 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.620682 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.621775 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.621926 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622041 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622108 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622173 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622255 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622276 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622348 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622359 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622414 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622461 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622352 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622051 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622426 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622125 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622429 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622713 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622746 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622786 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622885 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622912 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622918 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.622981 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.623006 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.623027 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.623114 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.623644 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.624541 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.626078 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.648523 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vv5lr"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.648819 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.649130 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.649231 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.652209 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.653785 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.653945 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.657874 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.658499 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dbszw"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.658705 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.659023 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.659176 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.659452 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.661558 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.662272 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gw5v5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.662873 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.665579 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.666234 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.667052 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7xtlk"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.667938 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.668548 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-pmblg"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.668941 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.669340 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.669626 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.670046 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.671348 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.672365 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673213 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673267 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d94b4cb1-bb7b-41c0-9670-654ba1336909-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673297 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-client-ca\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673329 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/727e26ec-b579-4b62-846e-c626fbf44f20-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673363 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46274096-898e-4f5e-9765-7f4058e4e5af-serving-cert\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673395 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gt5w\" (UniqueName: \"kubernetes.io/projected/fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e-kube-api-access-7gt5w\") pod \"cluster-samples-operator-665b6dd947-2v9xt\" (UID: \"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673441 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-oauth-config\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673546 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-node-pullsecrets\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673583 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-config\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673634 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/727e26ec-b579-4b62-846e-c626fbf44f20-config\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673673 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-audit-dir\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673703 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f8743aa-53f3-40d0-8af1-3daaae9404c4-serving-cert\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673730 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/26599783-6b54-49f1-885a-3e87257c7063-srv-cert\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673764 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a40a32f4-3f8b-4397-a193-536f81131064-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5nt7b\" (UID: \"a40a32f4-3f8b-4397-a193-536f81131064\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673797 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-encryption-config\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673826 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk86b\" (UniqueName: \"kubernetes.io/projected/6f8743aa-53f3-40d0-8af1-3daaae9404c4-kube-api-access-kk86b\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673873 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-trusted-ca-bundle\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.673895 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-image-import-ca\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674110 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82z7l\" (UniqueName: \"kubernetes.io/projected/51fb452a-e943-4222-a52b-dbdc0f378760-kube-api-access-82z7l\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674126 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8696312f-d81d-442b-b80c-6938db27e66b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674147 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv6lc\" (UniqueName: \"kubernetes.io/projected/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-kube-api-access-bv6lc\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674167 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-console-config\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674182 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp4qm\" (UniqueName: \"kubernetes.io/projected/660474bf-d4be-49dc-b993-5cd3161cb575-kube-api-access-tp4qm\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674200 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-2v9xt\" (UID: \"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674220 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-machine-approver-tls\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674235 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-config\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674258 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg4v5\" (UniqueName: \"kubernetes.io/projected/9e08710b-39f2-4458-82c7-7c4cd8978787-kube-api-access-hg4v5\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674273 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/26599783-6b54-49f1-885a-3e87257c7063-profile-collector-cert\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674292 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5c5bbff-cf34-40eb-b319-3b863d1e7776-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674309 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-auth-proxy-config\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674330 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmxr7\" (UniqueName: \"kubernetes.io/projected/a40a32f4-3f8b-4397-a193-536f81131064-kube-api-access-fmxr7\") pod \"multus-admission-controller-857f4d67dd-5nt7b\" (UID: \"a40a32f4-3f8b-4397-a193-536f81131064\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674348 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-config\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674364 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/687946c8-cb4f-4db3-85ed-31606d7a3e39-proxy-tls\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674381 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7269eba-82ff-4387-a35a-767850aa52d7-serving-cert\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674401 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9e08710b-39f2-4458-82c7-7c4cd8978787-images\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674419 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-service-ca\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674434 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d94b4cb1-bb7b-41c0-9670-654ba1336909-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674451 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-serving-cert\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674482 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674501 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/51fb452a-e943-4222-a52b-dbdc0f378760-service-ca-bundle\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674515 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh27v\" (UniqueName: \"kubernetes.io/projected/8696312f-d81d-442b-b80c-6938db27e66b-kube-api-access-fh27v\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674531 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c5bbff-cf34-40eb-b319-3b863d1e7776-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674558 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-etcd-client\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674572 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtcw8\" (UniqueName: \"kubernetes.io/projected/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-kube-api-access-jtcw8\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674588 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5rqd\" (UniqueName: \"kubernetes.io/projected/b5c5bbff-cf34-40eb-b319-3b863d1e7776-kube-api-access-j5rqd\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674603 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-service-ca-bundle\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674620 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9e08710b-39f2-4458-82c7-7c4cd8978787-proxy-tls\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674639 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-config\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674655 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f8743aa-53f3-40d0-8af1-3daaae9404c4-config\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674679 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/687946c8-cb4f-4db3-85ed-31606d7a3e39-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674696 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnlp9\" (UniqueName: \"kubernetes.io/projected/26599783-6b54-49f1-885a-3e87257c7063-kube-api-access-cnlp9\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674710 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9e08710b-39f2-4458-82c7-7c4cd8978787-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674725 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/727e26ec-b579-4b62-846e-c626fbf44f20-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674742 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-serving-cert\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674756 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-audit\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674774 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8696312f-d81d-442b-b80c-6938db27e66b-images\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674799 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-ca\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674813 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7frv\" (UniqueName: \"kubernetes.io/projected/a7269eba-82ff-4387-a35a-767850aa52d7-kube-api-access-g7frv\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674829 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8696312f-d81d-442b-b80c-6938db27e66b-config\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674844 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnflj\" (UniqueName: \"kubernetes.io/projected/687946c8-cb4f-4db3-85ed-31606d7a3e39-kube-api-access-mnflj\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674862 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-client\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674876 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmjtk\" (UniqueName: \"kubernetes.io/projected/46274096-898e-4f5e-9765-7f4058e4e5af-kube-api-access-gmjtk\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674889 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-service-ca\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674902 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-etcd-serving-ca\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674916 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f8743aa-53f3-40d0-8af1-3daaae9404c4-trusted-ca\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674931 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rx2t\" (UniqueName: \"kubernetes.io/projected/d94b4cb1-bb7b-41c0-9670-654ba1336909-kube-api-access-5rx2t\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.674947 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-config\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675136 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-stats-auth\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675154 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-default-certificate\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675170 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-oauth-serving-cert\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675185 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4678\" (UniqueName: \"kubernetes.io/projected/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-kube-api-access-q4678\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675202 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-serving-cert\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675217 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-metrics-certs\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.675935 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.676280 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.678600 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.679093 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.681391 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.688850 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.695872 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t9w8x"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.695964 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.707871 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.709101 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-dqndv"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.709124 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ccs9l"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.709141 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4zh9p"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.709151 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710035 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710061 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710073 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710086 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ftd94"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710191 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710544 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.710774 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.712725 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.714372 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wxvt5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.716614 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.723032 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-gs4k5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.723803 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.726704 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mpnmc"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.728601 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.729801 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.733934 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-pmblg"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.735157 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.736718 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.738412 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5nt7b"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.739814 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.740965 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dbszw"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.742592 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vl6t7"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.745596 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.751940 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.753300 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.754866 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.757025 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.757996 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.759319 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ghhnn"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.760038 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7xtlk"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.761226 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.762368 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gw5v5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.763626 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vv5lr"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.765533 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.766199 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.767250 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-ghbxg"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.768038 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.768696 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.769668 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-ncfz6"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.770389 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.770694 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.771763 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.772843 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ncfz6"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.773989 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775184 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775694 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-etcd-client\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775728 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtcw8\" (UniqueName: \"kubernetes.io/projected/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-kube-api-access-jtcw8\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775751 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5rqd\" (UniqueName: \"kubernetes.io/projected/b5c5bbff-cf34-40eb-b319-3b863d1e7776-kube-api-access-j5rqd\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775769 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-service-ca-bundle\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775786 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9e08710b-39f2-4458-82c7-7c4cd8978787-proxy-tls\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775802 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-config\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775818 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f8743aa-53f3-40d0-8af1-3daaae9404c4-config\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.775833 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/687946c8-cb4f-4db3-85ed-31606d7a3e39-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776608 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnlp9\" (UniqueName: \"kubernetes.io/projected/26599783-6b54-49f1-885a-3e87257c7063-kube-api-access-cnlp9\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776635 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9e08710b-39f2-4458-82c7-7c4cd8978787-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776653 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/727e26ec-b579-4b62-846e-c626fbf44f20-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776667 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-audit\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776743 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-service-ca-bundle\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776742 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/687946c8-cb4f-4db3-85ed-31606d7a3e39-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776576 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-config\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.776986 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f8743aa-53f3-40d0-8af1-3daaae9404c4-config\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777237 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9e08710b-39f2-4458-82c7-7c4cd8978787-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777431 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-serving-cert\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777486 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8696312f-d81d-442b-b80c-6938db27e66b-images\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777532 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-ca\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777560 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7frv\" (UniqueName: \"kubernetes.io/projected/a7269eba-82ff-4387-a35a-767850aa52d7-kube-api-access-g7frv\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777590 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8696312f-d81d-442b-b80c-6938db27e66b-config\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777614 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnflj\" (UniqueName: \"kubernetes.io/projected/687946c8-cb4f-4db3-85ed-31606d7a3e39-kube-api-access-mnflj\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777635 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-etcd-serving-ca\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777656 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f8743aa-53f3-40d0-8af1-3daaae9404c4-trusted-ca\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777682 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rx2t\" (UniqueName: \"kubernetes.io/projected/d94b4cb1-bb7b-41c0-9670-654ba1336909-kube-api-access-5rx2t\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777705 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-client\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777726 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmjtk\" (UniqueName: \"kubernetes.io/projected/46274096-898e-4f5e-9765-7f4058e4e5af-kube-api-access-gmjtk\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777746 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-audit\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777749 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-service-ca\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777791 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-config\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777810 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-stats-auth\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777828 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-default-certificate\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777847 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4678\" (UniqueName: \"kubernetes.io/projected/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-kube-api-access-q4678\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777865 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-oauth-serving-cert\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777882 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-serving-cert\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777897 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-metrics-certs\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777915 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777933 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d94b4cb1-bb7b-41c0-9670-654ba1336909-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777951 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-oauth-config\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777968 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-node-pullsecrets\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777983 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-client-ca\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.777999 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/727e26ec-b579-4b62-846e-c626fbf44f20-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778015 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46274096-898e-4f5e-9765-7f4058e4e5af-serving-cert\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778031 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gt5w\" (UniqueName: \"kubernetes.io/projected/fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e-kube-api-access-7gt5w\") pod \"cluster-samples-operator-665b6dd947-2v9xt\" (UID: \"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778047 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-config\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778066 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-audit-dir\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778079 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f8743aa-53f3-40d0-8af1-3daaae9404c4-serving-cert\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778094 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/26599783-6b54-49f1-885a-3e87257c7063-srv-cert\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778110 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/727e26ec-b579-4b62-846e-c626fbf44f20-config\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778128 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a40a32f4-3f8b-4397-a193-536f81131064-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5nt7b\" (UID: \"a40a32f4-3f8b-4397-a193-536f81131064\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778143 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-encryption-config\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778157 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk86b\" (UniqueName: \"kubernetes.io/projected/6f8743aa-53f3-40d0-8af1-3daaae9404c4-kube-api-access-kk86b\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778172 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-trusted-ca-bundle\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778189 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-image-import-ca\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778205 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82z7l\" (UniqueName: \"kubernetes.io/projected/51fb452a-e943-4222-a52b-dbdc0f378760-kube-api-access-82z7l\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778222 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8696312f-d81d-442b-b80c-6938db27e66b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778239 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-console-config\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778254 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp4qm\" (UniqueName: \"kubernetes.io/projected/660474bf-d4be-49dc-b993-5cd3161cb575-kube-api-access-tp4qm\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778258 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-ca\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778272 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv6lc\" (UniqueName: \"kubernetes.io/projected/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-kube-api-access-bv6lc\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778289 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-2v9xt\" (UID: \"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778308 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-machine-approver-tls\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778323 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-config\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778333 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t9w8x"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778339 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg4v5\" (UniqueName: \"kubernetes.io/projected/9e08710b-39f2-4458-82c7-7c4cd8978787-kube-api-access-hg4v5\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778394 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/26599783-6b54-49f1-885a-3e87257c7063-profile-collector-cert\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778401 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8696312f-d81d-442b-b80c-6938db27e66b-images\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778425 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5c5bbff-cf34-40eb-b319-3b863d1e7776-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778452 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-auth-proxy-config\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778491 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/687946c8-cb4f-4db3-85ed-31606d7a3e39-proxy-tls\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778514 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7269eba-82ff-4387-a35a-767850aa52d7-serving-cert\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778538 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmxr7\" (UniqueName: \"kubernetes.io/projected/a40a32f4-3f8b-4397-a193-536f81131064-kube-api-access-fmxr7\") pod \"multus-admission-controller-857f4d67dd-5nt7b\" (UID: \"a40a32f4-3f8b-4397-a193-536f81131064\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778561 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-config\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778584 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9e08710b-39f2-4458-82c7-7c4cd8978787-images\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778604 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-service-ca\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778625 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d94b4cb1-bb7b-41c0-9670-654ba1336909-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778680 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-serving-cert\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778687 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-service-ca\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778702 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778745 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-node-pullsecrets\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778757 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/51fb452a-e943-4222-a52b-dbdc0f378760-service-ca-bundle\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778802 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh27v\" (UniqueName: \"kubernetes.io/projected/8696312f-d81d-442b-b80c-6938db27e66b-kube-api-access-fh27v\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778833 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c5bbff-cf34-40eb-b319-3b863d1e7776-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778954 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-config\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.778998 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-audit-dir\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.779367 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.779665 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.779686 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c5bbff-cf34-40eb-b319-3b863d1e7776-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.779753 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-config\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.779921 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/51fb452a-e943-4222-a52b-dbdc0f378760-service-ca-bundle\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.780536 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-client-ca\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.780923 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8696312f-d81d-442b-b80c-6938db27e66b-config\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.781278 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/727e26ec-b579-4b62-846e-c626fbf44f20-config\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.781679 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-oauth-serving-cert\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.782165 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-console-config\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.782277 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-config\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.782918 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f8743aa-53f3-40d0-8af1-3daaae9404c4-serving-cert\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.783076 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-service-ca\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.783251 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-trusted-ca-bundle\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.783430 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-auth-proxy-config\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.783677 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-gs4k5"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.783709 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jxwjd"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.783725 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/727e26ec-b579-4b62-846e-c626fbf44f20-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.784336 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8696312f-d81d-442b-b80c-6938db27e66b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.784546 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-etcd-serving-ca\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.784581 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jxwjd"] Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.784654 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.784836 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-etcd-client\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.784888 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f8743aa-53f3-40d0-8af1-3daaae9404c4-trusted-ca\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.785293 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-image-import-ca\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.785443 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d94b4cb1-bb7b-41c0-9670-654ba1336909-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.786257 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-stats-auth\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.786369 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-machine-approver-tls\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.786432 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-metrics-certs\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.787685 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/26599783-6b54-49f1-885a-3e87257c7063-profile-collector-cert\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.787883 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-encryption-config\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.787927 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/46274096-898e-4f5e-9765-7f4058e4e5af-etcd-client\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.788063 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-serving-cert\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.788132 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/687946c8-cb4f-4db3-85ed-31606d7a3e39-proxy-tls\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.788480 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-oauth-config\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.788795 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/51fb452a-e943-4222-a52b-dbdc0f378760-default-certificate\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.788893 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7269eba-82ff-4387-a35a-767850aa52d7-serving-cert\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.789033 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/26599783-6b54-49f1-885a-3e87257c7063-srv-cert\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.789334 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.789363 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46274096-898e-4f5e-9765-7f4058e4e5af-serving-cert\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.789529 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/a40a32f4-3f8b-4397-a193-536f81131064-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5nt7b\" (UID: \"a40a32f4-3f8b-4397-a193-536f81131064\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.790358 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-serving-cert\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.790824 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-serving-cert\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.791027 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-2v9xt\" (UID: \"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.791485 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d94b4cb1-bb7b-41c0-9670-654ba1336909-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.792366 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5c5bbff-cf34-40eb-b319-3b863d1e7776-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.794145 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7269eba-82ff-4387-a35a-767850aa52d7-config\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.809663 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.813332 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9e08710b-39f2-4458-82c7-7c4cd8978787-images\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.829696 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.849615 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.864075 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9e08710b-39f2-4458-82c7-7c4cd8978787-proxy-tls\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.890145 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.909625 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.929287 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.948965 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.969008 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 10 10:47:48 crc kubenswrapper[4682]: I1210 10:47:48.989728 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.009309 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.029194 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.050282 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.069400 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.089206 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.109765 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.112052 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4690e6f02078879419854891c73a99a1b20b9cba6bc34a4623ac5986c8fc51e3"} Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.113204 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"6025f9e390886ad9d879c8c923b734652f3e2d7c1bf2e8faedff4cf636aeb221"} Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.113399 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.129645 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.149160 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.169293 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.189745 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.216505 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.230064 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.249922 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.289332 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.309761 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.329004 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.349094 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.369210 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.390010 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.409211 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.430723 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.449006 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.469581 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.488935 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.510530 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.530828 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.550062 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.569812 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.590149 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.610877 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.629660 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.649911 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.668231 4682 request.go:700] Waited for 1.005041168s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/secrets?fieldSelector=metadata.name%3Dopenshift-controller-manager-sa-dockercfg-msq4c&limit=500&resourceVersion=0 Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.669909 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.690158 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.710182 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.737083 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.749878 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.770365 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.788813 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.810370 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.829494 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.848827 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.868663 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.888905 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.909297 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.929598 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.949838 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.970787 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 10 10:47:49 crc kubenswrapper[4682]: I1210 10:47:49.990232 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.010168 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.029998 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.050206 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.070078 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.090557 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.111136 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.129993 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.149841 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.170004 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.190366 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.222104 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.229629 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.249829 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.269978 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.290151 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.310169 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.330500 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.350739 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.369523 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.390074 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.410728 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.428752 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.448425 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.469575 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.489066 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.536787 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5rqd\" (UniqueName: \"kubernetes.io/projected/b5c5bbff-cf34-40eb-b319-3b863d1e7776-kube-api-access-j5rqd\") pod \"openshift-apiserver-operator-796bbdcf4f-x84jm\" (UID: \"b5c5bbff-cf34-40eb-b319-3b863d1e7776\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.546937 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtcw8\" (UniqueName: \"kubernetes.io/projected/e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9-kube-api-access-jtcw8\") pod \"apiserver-76f77b778f-ccs9l\" (UID: \"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9\") " pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.573576 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnlp9\" (UniqueName: \"kubernetes.io/projected/26599783-6b54-49f1-885a-3e87257c7063-kube-api-access-cnlp9\") pod \"catalog-operator-68c6474976-gcpj6\" (UID: \"26599783-6b54-49f1-885a-3e87257c7063\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.588968 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gt5w\" (UniqueName: \"kubernetes.io/projected/fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e-kube-api-access-7gt5w\") pod \"cluster-samples-operator-665b6dd947-2v9xt\" (UID: \"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.609013 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg4v5\" (UniqueName: \"kubernetes.io/projected/9e08710b-39f2-4458-82c7-7c4cd8978787-kube-api-access-hg4v5\") pod \"machine-config-operator-74547568cd-xpg4s\" (UID: \"9e08710b-39f2-4458-82c7-7c4cd8978787\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.628109 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7frv\" (UniqueName: \"kubernetes.io/projected/a7269eba-82ff-4387-a35a-767850aa52d7-kube-api-access-g7frv\") pod \"authentication-operator-69f744f599-ghhnn\" (UID: \"a7269eba-82ff-4387-a35a-767850aa52d7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.629455 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.643244 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.647355 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82z7l\" (UniqueName: \"kubernetes.io/projected/51fb452a-e943-4222-a52b-dbdc0f378760-kube-api-access-82z7l\") pod \"router-default-5444994796-jfqfn\" (UID: \"51fb452a-e943-4222-a52b-dbdc0f378760\") " pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.653015 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.680484 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/727e26ec-b579-4b62-846e-c626fbf44f20-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-j2cmz\" (UID: \"727e26ec-b579-4b62-846e-c626fbf44f20\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.687794 4682 request.go:700] Waited for 1.906121941s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.692143 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv6lc\" (UniqueName: \"kubernetes.io/projected/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-kube-api-access-bv6lc\") pod \"route-controller-manager-6576b87f9c-mp9vx\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.710922 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4678\" (UniqueName: \"kubernetes.io/projected/2b0c1536-0797-49f3-8f0f-de2bb4760a6b-kube-api-access-q4678\") pod \"machine-approver-56656f9798-lfvph\" (UID: \"2b0c1536-0797-49f3-8f0f-de2bb4760a6b\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.731003 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh27v\" (UniqueName: \"kubernetes.io/projected/8696312f-d81d-442b-b80c-6938db27e66b-kube-api-access-fh27v\") pod \"machine-api-operator-5694c8668f-dqndv\" (UID: \"8696312f-d81d-442b-b80c-6938db27e66b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.742392 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.758997 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk86b\" (UniqueName: \"kubernetes.io/projected/6f8743aa-53f3-40d0-8af1-3daaae9404c4-kube-api-access-kk86b\") pod \"console-operator-58897d9998-vl6t7\" (UID: \"6f8743aa-53f3-40d0-8af1-3daaae9404c4\") " pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.759223 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.772896 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp4qm\" (UniqueName: \"kubernetes.io/projected/660474bf-d4be-49dc-b993-5cd3161cb575-kube-api-access-tp4qm\") pod \"console-f9d7485db-ftd94\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.788259 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rx2t\" (UniqueName: \"kubernetes.io/projected/d94b4cb1-bb7b-41c0-9670-654ba1336909-kube-api-access-5rx2t\") pod \"openshift-controller-manager-operator-756b6f6bc6-jc5g5\" (UID: \"d94b4cb1-bb7b-41c0-9670-654ba1336909\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.794112 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.813747 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmxr7\" (UniqueName: \"kubernetes.io/projected/a40a32f4-3f8b-4397-a193-536f81131064-kube-api-access-fmxr7\") pod \"multus-admission-controller-857f4d67dd-5nt7b\" (UID: \"a40a32f4-3f8b-4397-a193-536f81131064\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.816922 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.828271 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnflj\" (UniqueName: \"kubernetes.io/projected/687946c8-cb4f-4db3-85ed-31606d7a3e39-kube-api-access-mnflj\") pod \"machine-config-controller-84d6567774-jdqwb\" (UID: \"687946c8-cb4f-4db3-85ed-31606d7a3e39\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.834794 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.845133 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmjtk\" (UniqueName: \"kubernetes.io/projected/46274096-898e-4f5e-9765-7f4058e4e5af-kube-api-access-gmjtk\") pod \"etcd-operator-b45778765-wxvt5\" (UID: \"46274096-898e-4f5e-9765-7f4058e4e5af\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.851747 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.855527 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.863316 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.869549 4682 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.870113 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.879552 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.891855 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916671 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-bound-sa-token\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916725 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpvq9\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-kube-api-access-kpvq9\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916752 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916784 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916812 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916850 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-trusted-ca\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916876 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0a575832-6a51-4f80-9c12-346c7d4764f2-ca-trust-extracted\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916903 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-dir\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916937 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916966 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.916995 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917014 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917053 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917074 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917095 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-tls\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917122 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917146 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0a575832-6a51-4f80-9c12-346c7d4764f2-installation-pull-secrets\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917172 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-certificates\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917210 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917226 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917245 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-policies\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.917262 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rttvq\" (UniqueName: \"kubernetes.io/projected/7076dac7-bf2d-4191-81f5-73b260ff0a75-kube-api-access-rttvq\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:50 crc kubenswrapper[4682]: E1210 10:47:50.917768 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.417752757 +0000 UTC m=+151.737963707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.920033 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s"] Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.927969 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.933922 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.961272 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" Dec 10 10:47:50 crc kubenswrapper[4682]: I1210 10:47:50.972078 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.018013 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.018095 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.51807993 +0000 UTC m=+151.838290680 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019336 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019372 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a9170d46-a469-4124-9c5a-57ce54d5dfec-metrics-tls\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019390 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fe39f56-5b24-4b88-9cd6-02458b68986d-config-volume\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019416 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019436 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cb06630-9676-44bf-9dff-1b99d98f7991-config\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019458 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa13bce-7730-4b3e-aab0-41bfb905edf5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019503 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019538 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fe39f56-5b24-4b88-9cd6-02458b68986d-secret-volume\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019554 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dpff\" (UniqueName: \"kubernetes.io/projected/df9b9c19-3321-4bcd-a43a-0f2eb32ea147-kube-api-access-8dpff\") pod \"migrator-59844c95c7-68w64\" (UID: \"df9b9c19-3321-4bcd-a43a-0f2eb32ea147\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019583 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2727aac6-7187-49fa-afc4-f339aef7d96a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019633 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcxcq\" (UniqueName: \"kubernetes.io/projected/a9170d46-a469-4124-9c5a-57ce54d5dfec-kube-api-access-lcxcq\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019660 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-certificates\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019676 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019692 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b066d982-7235-4c40-b72c-987b213031b2-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019711 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqs8k\" (UniqueName: \"kubernetes.io/projected/cfbd68ba-8aec-439c-9549-9347c5e80d21-kube-api-access-lqs8k\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019728 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019754 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019770 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/499d5dfb-fb52-403b-9249-259a383d7562-config\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019786 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4668e8a7-5460-4e80-bc1a-7895133d6708-node-bootstrap-token\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019802 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aa13bce-7730-4b3e-aab0-41bfb905edf5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019817 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-serving-cert\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019843 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-policies\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019858 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rttvq\" (UniqueName: \"kubernetes.io/projected/7076dac7-bf2d-4191-81f5-73b260ff0a75-kube-api-access-rttvq\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019874 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn6mj\" (UniqueName: \"kubernetes.io/projected/c39ff528-9225-4c16-b25d-1b34929dadcb-kube-api-access-jn6mj\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019899 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-754bh\" (UniqueName: \"kubernetes.io/projected/2727aac6-7187-49fa-afc4-f339aef7d96a-kube-api-access-754bh\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019915 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cb06630-9676-44bf-9dff-1b99d98f7991-serving-cert\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019929 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4668e8a7-5460-4e80-bc1a-7895133d6708-certs\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019950 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ppd6\" (UniqueName: \"kubernetes.io/projected/b066d982-7235-4c40-b72c-987b213031b2-kube-api-access-9ppd6\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.019967 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-bound-sa-token\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020013 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-config\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020031 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020055 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05316a74-fdb1-46dd-a91c-eea173459834-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020090 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfbd68ba-8aec-439c-9549-9347c5e80d21-serving-cert\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020106 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9170d46-a469-4124-9c5a-57ce54d5dfec-config-volume\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020122 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020153 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0a575832-6a51-4f80-9c12-346c7d4764f2-ca-trust-extracted\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020169 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1aa13bce-7730-4b3e-aab0-41bfb905edf5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020193 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-dir\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020242 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020256 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/48e39be3-4c21-47aa-86cd-ec2830784ad6-signing-key\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020280 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020297 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2727aac6-7187-49fa-afc4-f339aef7d96a-srv-cert\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020313 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020346 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb88b\" (UniqueName: \"kubernetes.io/projected/e21d3967-e194-40a2-b3cd-f482e84c70e3-kube-api-access-qb88b\") pod \"ingress-canary-ncfz6\" (UID: \"e21d3967-e194-40a2-b3cd-f482e84c70e3\") " pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020363 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-tls\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020378 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020405 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0a575832-6a51-4f80-9c12-346c7d4764f2-installation-pull-secrets\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020438 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/48e39be3-4c21-47aa-86cd-ec2830784ad6-signing-cabundle\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020496 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szmc5\" (UniqueName: \"kubernetes.io/projected/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-kube-api-access-szmc5\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020512 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr5zw\" (UniqueName: \"kubernetes.io/projected/1cb06630-9676-44bf-9dff-1b99d98f7991-kube-api-access-sr5zw\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020538 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkkw5\" (UniqueName: \"kubernetes.io/projected/8fe39f56-5b24-4b88-9cd6-02458b68986d-kube-api-access-qkkw5\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020553 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b066d982-7235-4c40-b72c-987b213031b2-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020569 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/499d5dfb-fb52-403b-9249-259a383d7562-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020583 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-client-ca\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020605 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/499d5dfb-fb52-403b-9249-259a383d7562-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020623 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020650 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpvq9\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-kube-api-access-kpvq9\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020685 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020704 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cfbd68ba-8aec-439c-9549-9347c5e80d21-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020721 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b066d982-7235-4c40-b72c-987b213031b2-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020737 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6scz7\" (UniqueName: \"kubernetes.io/projected/d6434666-a341-4560-a0ff-92d26a79c668-kube-api-access-6scz7\") pod \"downloads-7954f5f757-7xtlk\" (UID: \"d6434666-a341-4560-a0ff-92d26a79c668\") " pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020752 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nnhv\" (UniqueName: \"kubernetes.io/projected/4668e8a7-5460-4e80-bc1a-7895133d6708-kube-api-access-5nnhv\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020768 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e21d3967-e194-40a2-b3cd-f482e84c70e3-cert\") pod \"ingress-canary-ncfz6\" (UID: \"e21d3967-e194-40a2-b3cd-f482e84c70e3\") " pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020795 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-trusted-ca\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020828 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkzz7\" (UniqueName: \"kubernetes.io/projected/48e39be3-4c21-47aa-86cd-ec2830784ad6-kube-api-access-lkzz7\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.020856 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.022195 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.024083 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0a575832-6a51-4f80-9c12-346c7d4764f2-ca-trust-extracted\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.024309 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-dir\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.024601 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-policies\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.026706 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.027325 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-tls\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.027351 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.029146 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-certificates\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.029729 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.030056 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.030416 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0a575832-6a51-4f80-9c12-346c7d4764f2-installation-pull-secrets\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.030799 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.031496 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.032423 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.032512 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.53249623 +0000 UTC m=+151.852706980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.032596 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-trusted-ca\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.034731 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.034870 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.038110 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.067211 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-bound-sa-token\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.072299 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ghhnn"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.088300 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ccs9l"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.101568 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpvq9\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-kube-api-access-kpvq9\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.115762 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rttvq\" (UniqueName: \"kubernetes.io/projected/7076dac7-bf2d-4191-81f5-73b260ff0a75-kube-api-access-rttvq\") pod \"oauth-openshift-558db77b4-4zh9p\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121165 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.121285 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.621245797 +0000 UTC m=+151.941456547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121384 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb88b\" (UniqueName: \"kubernetes.io/projected/e21d3967-e194-40a2-b3cd-f482e84c70e3-kube-api-access-qb88b\") pod \"ingress-canary-ncfz6\" (UID: \"e21d3967-e194-40a2-b3cd-f482e84c70e3\") " pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121413 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n69w\" (UniqueName: \"kubernetes.io/projected/de21f9aa-1450-423f-93f7-75b6ca444f9f-kube-api-access-6n69w\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121434 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/faca7bed-c836-4f78-aaa9-29ec2b6db91b-trusted-ca\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121464 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsgpl\" (UniqueName: \"kubernetes.io/projected/fb0db14b-539a-489f-baea-92c499d99906-kube-api-access-qsgpl\") pod \"control-plane-machine-set-operator-78cbb6b69f-9gwb2\" (UID: \"fb0db14b-539a-489f-baea-92c499d99906\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121499 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/48e39be3-4c21-47aa-86cd-ec2830784ad6-signing-cabundle\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121517 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pfrk\" (UniqueName: \"kubernetes.io/projected/cf739648-f0c6-4f34-be4b-57f84579a9cb-kube-api-access-9pfrk\") pod \"package-server-manager-789f6589d5-bc6vq\" (UID: \"cf739648-f0c6-4f34-be4b-57f84579a9cb\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121541 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/faca7bed-c836-4f78-aaa9-29ec2b6db91b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121556 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/cf739648-f0c6-4f34-be4b-57f84579a9cb-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-bc6vq\" (UID: \"cf739648-f0c6-4f34-be4b-57f84579a9cb\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121571 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-encryption-config\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121595 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-plugins-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121610 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szmc5\" (UniqueName: \"kubernetes.io/projected/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-kube-api-access-szmc5\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121625 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr5zw\" (UniqueName: \"kubernetes.io/projected/1cb06630-9676-44bf-9dff-1b99d98f7991-kube-api-access-sr5zw\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121642 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-registration-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121659 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkkw5\" (UniqueName: \"kubernetes.io/projected/8fe39f56-5b24-4b88-9cd6-02458b68986d-kube-api-access-qkkw5\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121675 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b066d982-7235-4c40-b72c-987b213031b2-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121698 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/499d5dfb-fb52-403b-9249-259a383d7562-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121723 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/499d5dfb-fb52-403b-9249-259a383d7562-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121739 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-client-ca\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121759 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-serving-cert\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121774 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/de21f9aa-1450-423f-93f7-75b6ca444f9f-audit-dir\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121787 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-mountpoint-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121805 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cfbd68ba-8aec-439c-9549-9347c5e80d21-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121820 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6scz7\" (UniqueName: \"kubernetes.io/projected/d6434666-a341-4560-a0ff-92d26a79c668-kube-api-access-6scz7\") pod \"downloads-7954f5f757-7xtlk\" (UID: \"d6434666-a341-4560-a0ff-92d26a79c668\") " pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121836 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nnhv\" (UniqueName: \"kubernetes.io/projected/4668e8a7-5460-4e80-bc1a-7895133d6708-kube-api-access-5nnhv\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121851 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b066d982-7235-4c40-b72c-987b213031b2-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121866 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-apiservice-cert\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121884 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e21d3967-e194-40a2-b3cd-f482e84c70e3-cert\") pod \"ingress-canary-ncfz6\" (UID: \"e21d3967-e194-40a2-b3cd-f482e84c70e3\") " pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121919 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5bfx\" (UniqueName: \"kubernetes.io/projected/677d94d3-efad-4264-88fb-cbbacbb2e267-kube-api-access-x5bfx\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121937 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkzz7\" (UniqueName: \"kubernetes.io/projected/48e39be3-4c21-47aa-86cd-ec2830784ad6-kube-api-access-lkzz7\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121953 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121976 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.121992 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrn5c\" (UniqueName: \"kubernetes.io/projected/b334d688-3122-4479-bfcb-37e70a059129-kube-api-access-nrn5c\") pod \"dns-operator-744455d44c-pmblg\" (UID: \"b334d688-3122-4479-bfcb-37e70a059129\") " pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122013 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a9170d46-a469-4124-9c5a-57ce54d5dfec-metrics-tls\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122027 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fe39f56-5b24-4b88-9cd6-02458b68986d-config-volume\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122044 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cb06630-9676-44bf-9dff-1b99d98f7991-config\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122058 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa13bce-7730-4b3e-aab0-41bfb905edf5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122073 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-audit-policies\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122093 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122109 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/faca7bed-c836-4f78-aaa9-29ec2b6db91b-metrics-tls\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122125 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fe39f56-5b24-4b88-9cd6-02458b68986d-secret-volume\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122153 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dpff\" (UniqueName: \"kubernetes.io/projected/df9b9c19-3321-4bcd-a43a-0f2eb32ea147-kube-api-access-8dpff\") pod \"migrator-59844c95c7-68w64\" (UID: \"df9b9c19-3321-4bcd-a43a-0f2eb32ea147\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122169 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65ppq\" (UniqueName: \"kubernetes.io/projected/faca7bed-c836-4f78-aaa9-29ec2b6db91b-kube-api-access-65ppq\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122196 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2727aac6-7187-49fa-afc4-f339aef7d96a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122210 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-csi-data-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122243 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbnct\" (UniqueName: \"kubernetes.io/projected/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-kube-api-access-rbnct\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122258 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b334d688-3122-4479-bfcb-37e70a059129-metrics-tls\") pod \"dns-operator-744455d44c-pmblg\" (UID: \"b334d688-3122-4479-bfcb-37e70a059129\") " pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122274 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcxcq\" (UniqueName: \"kubernetes.io/projected/a9170d46-a469-4124-9c5a-57ce54d5dfec-kube-api-access-lcxcq\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122298 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122315 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b066d982-7235-4c40-b72c-987b213031b2-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122350 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqs8k\" (UniqueName: \"kubernetes.io/projected/cfbd68ba-8aec-439c-9549-9347c5e80d21-kube-api-access-lqs8k\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122377 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-etcd-client\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122393 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/499d5dfb-fb52-403b-9249-259a383d7562-config\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122407 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4668e8a7-5460-4e80-bc1a-7895133d6708-node-bootstrap-token\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122423 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aa13bce-7730-4b3e-aab0-41bfb905edf5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122437 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-serving-cert\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122454 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-754bh\" (UniqueName: \"kubernetes.io/projected/2727aac6-7187-49fa-afc4-f339aef7d96a-kube-api-access-754bh\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122706 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn6mj\" (UniqueName: \"kubernetes.io/projected/c39ff528-9225-4c16-b25d-1b34929dadcb-kube-api-access-jn6mj\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122731 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cb06630-9676-44bf-9dff-1b99d98f7991-serving-cert\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122746 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4668e8a7-5460-4e80-bc1a-7895133d6708-certs\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122769 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ppd6\" (UniqueName: \"kubernetes.io/projected/b066d982-7235-4c40-b72c-987b213031b2-kube-api-access-9ppd6\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122799 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/fb0db14b-539a-489f-baea-92c499d99906-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9gwb2\" (UID: \"fb0db14b-539a-489f-baea-92c499d99906\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122816 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-config\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122833 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05316a74-fdb1-46dd-a91c-eea173459834-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122853 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfbd68ba-8aec-439c-9549-9347c5e80d21-serving-cert\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122867 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-socket-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122895 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6k5x\" (UniqueName: \"kubernetes.io/projected/05316a74-fdb1-46dd-a91c-eea173459834-kube-api-access-b6k5x\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122921 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9170d46-a469-4124-9c5a-57ce54d5dfec-config-volume\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122936 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-webhook-cert\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122951 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122970 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-tmpfs\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.122987 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1aa13bce-7730-4b3e-aab0-41bfb905edf5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.123013 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.123029 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05316a74-fdb1-46dd-a91c-eea173459834-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.123045 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/48e39be3-4c21-47aa-86cd-ec2830784ad6-signing-key\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.123052 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/48e39be3-4c21-47aa-86cd-ec2830784ad6-signing-cabundle\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.123061 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2727aac6-7187-49fa-afc4-f339aef7d96a-srv-cert\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.123547 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" event={"ID":"a7269eba-82ff-4387-a35a-767850aa52d7","Type":"ContainerStarted","Data":"d43f06f8c858f24dd3a108270f6a48939a3c2bb2e9b09f48d66b60c76f661c36"} Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.126749 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.127090 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" event={"ID":"2b0c1536-0797-49f3-8f0f-de2bb4760a6b","Type":"ContainerStarted","Data":"55ec1bb635de7889d5aa6bd36c8329557f50fb9a45cb97f2870450eb4086082b"} Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.127233 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/cfbd68ba-8aec-439c-9549-9347c5e80d21-available-featuregates\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.128614 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.128890 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b066d982-7235-4c40-b72c-987b213031b2-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.129267 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2727aac6-7187-49fa-afc4-f339aef7d96a-srv-cert\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.129335 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.629317564 +0000 UTC m=+151.949528314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.129374 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-jfqfn" event={"ID":"51fb452a-e943-4222-a52b-dbdc0f378760","Type":"ContainerStarted","Data":"bbe912c15d79ae45789ddac2470058e489afe8a2c8f12d885946c9937d280ee8"} Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.129947 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aa13bce-7730-4b3e-aab0-41bfb905edf5-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.129995 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fe39f56-5b24-4b88-9cd6-02458b68986d-config-volume\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.131259 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cb06630-9676-44bf-9dff-1b99d98f7991-config\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.131373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" event={"ID":"9e08710b-39f2-4458-82c7-7c4cd8978787","Type":"ContainerStarted","Data":"d710e232a839fafe3a4b9260948edbf7d69cd87cbad6894d7d254db0fdc7b7f7"} Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.131920 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-config\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.132511 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/499d5dfb-fb52-403b-9249-259a383d7562-config\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.132968 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a9170d46-a469-4124-9c5a-57ce54d5dfec-config-volume\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.132978 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.133066 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-client-ca\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.134207 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1cb06630-9676-44bf-9dff-1b99d98f7991-serving-cert\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.141344 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/b066d982-7235-4c40-b72c-987b213031b2-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.143761 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aa13bce-7730-4b3e-aab0-41bfb905edf5-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.143979 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2727aac6-7187-49fa-afc4-f339aef7d96a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.144365 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/499d5dfb-fb52-403b-9249-259a383d7562-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.144872 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/48e39be3-4c21-47aa-86cd-ec2830784ad6-signing-key\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.144936 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.149676 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/4668e8a7-5460-4e80-bc1a-7895133d6708-certs\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.151013 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e21d3967-e194-40a2-b3cd-f482e84c70e3-cert\") pod \"ingress-canary-ncfz6\" (UID: \"e21d3967-e194-40a2-b3cd-f482e84c70e3\") " pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.152911 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fe39f56-5b24-4b88-9cd6-02458b68986d-secret-volume\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.152997 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05316a74-fdb1-46dd-a91c-eea173459834-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.155728 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.156384 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfbd68ba-8aec-439c-9549-9347c5e80d21-serving-cert\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.156630 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/4668e8a7-5460-4e80-bc1a-7895133d6708-node-bootstrap-token\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.157019 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a9170d46-a469-4124-9c5a-57ce54d5dfec-metrics-tls\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.157069 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-serving-cert\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.168997 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb88b\" (UniqueName: \"kubernetes.io/projected/e21d3967-e194-40a2-b3cd-f482e84c70e3-kube-api-access-qb88b\") pod \"ingress-canary-ncfz6\" (UID: \"e21d3967-e194-40a2-b3cd-f482e84c70e3\") " pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.187663 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szmc5\" (UniqueName: \"kubernetes.io/projected/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-kube-api-access-szmc5\") pod \"controller-manager-879f6c89f-gw5v5\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.206912 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr5zw\" (UniqueName: \"kubernetes.io/projected/1cb06630-9676-44bf-9dff-1b99d98f7991-kube-api-access-sr5zw\") pod \"service-ca-operator-777779d784-4rz5b\" (UID: \"1cb06630-9676-44bf-9dff-1b99d98f7991\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225050 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225236 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n69w\" (UniqueName: \"kubernetes.io/projected/de21f9aa-1450-423f-93f7-75b6ca444f9f-kube-api-access-6n69w\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225264 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/faca7bed-c836-4f78-aaa9-29ec2b6db91b-trusted-ca\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.225312 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.725289615 +0000 UTC m=+152.045500375 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225345 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsgpl\" (UniqueName: \"kubernetes.io/projected/fb0db14b-539a-489f-baea-92c499d99906-kube-api-access-qsgpl\") pod \"control-plane-machine-set-operator-78cbb6b69f-9gwb2\" (UID: \"fb0db14b-539a-489f-baea-92c499d99906\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225372 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pfrk\" (UniqueName: \"kubernetes.io/projected/cf739648-f0c6-4f34-be4b-57f84579a9cb-kube-api-access-9pfrk\") pod \"package-server-manager-789f6589d5-bc6vq\" (UID: \"cf739648-f0c6-4f34-be4b-57f84579a9cb\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225392 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/faca7bed-c836-4f78-aaa9-29ec2b6db91b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225410 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/cf739648-f0c6-4f34-be4b-57f84579a9cb-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-bc6vq\" (UID: \"cf739648-f0c6-4f34-be4b-57f84579a9cb\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225427 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-encryption-config\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225444 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-plugins-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225480 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-registration-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225521 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-serving-cert\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225536 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/de21f9aa-1450-423f-93f7-75b6ca444f9f-audit-dir\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225553 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-mountpoint-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225585 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-apiservice-cert\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225604 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5bfx\" (UniqueName: \"kubernetes.io/projected/677d94d3-efad-4264-88fb-cbbacbb2e267-kube-api-access-x5bfx\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225629 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225645 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225660 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrn5c\" (UniqueName: \"kubernetes.io/projected/b334d688-3122-4479-bfcb-37e70a059129-kube-api-access-nrn5c\") pod \"dns-operator-744455d44c-pmblg\" (UID: \"b334d688-3122-4479-bfcb-37e70a059129\") " pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225680 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-audit-policies\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225698 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225722 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/faca7bed-c836-4f78-aaa9-29ec2b6db91b-metrics-tls\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225748 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65ppq\" (UniqueName: \"kubernetes.io/projected/faca7bed-c836-4f78-aaa9-29ec2b6db91b-kube-api-access-65ppq\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225764 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-csi-data-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225769 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-plugins-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225781 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbnct\" (UniqueName: \"kubernetes.io/projected/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-kube-api-access-rbnct\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225796 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b334d688-3122-4479-bfcb-37e70a059129-metrics-tls\") pod \"dns-operator-744455d44c-pmblg\" (UID: \"b334d688-3122-4479-bfcb-37e70a059129\") " pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225850 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-etcd-client\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225900 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/fb0db14b-539a-489f-baea-92c499d99906-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9gwb2\" (UID: \"fb0db14b-539a-489f-baea-92c499d99906\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225919 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-socket-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225935 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6k5x\" (UniqueName: \"kubernetes.io/projected/05316a74-fdb1-46dd-a91c-eea173459834-kube-api-access-b6k5x\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225950 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-webhook-cert\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225965 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-tmpfs\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.225989 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05316a74-fdb1-46dd-a91c-eea173459834-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.226270 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/faca7bed-c836-4f78-aaa9-29ec2b6db91b-trusted-ca\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.227793 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.727783248 +0000 UTC m=+152.047993988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.227857 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-registration-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.228193 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05316a74-fdb1-46dd-a91c-eea173459834-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.228744 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/de21f9aa-1450-423f-93f7-75b6ca444f9f-audit-dir\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.228829 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-mountpoint-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.234139 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.234932 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-apiservice-cert\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.235146 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-csi-data-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.235191 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.235346 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/677d94d3-efad-4264-88fb-cbbacbb2e267-socket-dir\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.235530 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-tmpfs\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.236265 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/de21f9aa-1450-423f-93f7-75b6ca444f9f-audit-policies\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.238215 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-etcd-client\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.239176 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/fb0db14b-539a-489f-baea-92c499d99906-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-9gwb2\" (UID: \"fb0db14b-539a-489f-baea-92c499d99906\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.239631 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-encryption-config\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.239874 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b334d688-3122-4479-bfcb-37e70a059129-metrics-tls\") pod \"dns-operator-744455d44c-pmblg\" (UID: \"b334d688-3122-4479-bfcb-37e70a059129\") " pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.243709 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/faca7bed-c836-4f78-aaa9-29ec2b6db91b-metrics-tls\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.248596 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkkw5\" (UniqueName: \"kubernetes.io/projected/8fe39f56-5b24-4b88-9cd6-02458b68986d-kube-api-access-qkkw5\") pod \"collect-profiles-29422725-7kvsn\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.249439 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-webhook-cert\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.251752 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de21f9aa-1450-423f-93f7-75b6ca444f9f-serving-cert\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.254022 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcxcq\" (UniqueName: \"kubernetes.io/projected/a9170d46-a469-4124-9c5a-57ce54d5dfec-kube-api-access-lcxcq\") pod \"dns-default-gs4k5\" (UID: \"a9170d46-a469-4124-9c5a-57ce54d5dfec\") " pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.258283 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/cf739648-f0c6-4f34-be4b-57f84579a9cb-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-bc6vq\" (UID: \"cf739648-f0c6-4f34-be4b-57f84579a9cb\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.270926 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.277563 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.277688 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.278054 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.282307 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b066d982-7235-4c40-b72c-987b213031b2-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.296851 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkzz7\" (UniqueName: \"kubernetes.io/projected/48e39be3-4c21-47aa-86cd-ec2830784ad6-kube-api-access-lkzz7\") pod \"service-ca-9c57cc56f-vv5lr\" (UID: \"48e39be3-4c21-47aa-86cd-ec2830784ad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.298077 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.308070 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqs8k\" (UniqueName: \"kubernetes.io/projected/cfbd68ba-8aec-439c-9549-9347c5e80d21-kube-api-access-lqs8k\") pod \"openshift-config-operator-7777fb866f-dbszw\" (UID: \"cfbd68ba-8aec-439c-9549-9347c5e80d21\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.326158 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6scz7\" (UniqueName: \"kubernetes.io/projected/d6434666-a341-4560-a0ff-92d26a79c668-kube-api-access-6scz7\") pod \"downloads-7954f5f757-7xtlk\" (UID: \"d6434666-a341-4560-a0ff-92d26a79c668\") " pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.330206 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.330390 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.330734 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.830709156 +0000 UTC m=+152.150919916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.330925 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.331227 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.831211865 +0000 UTC m=+152.151422615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: W1210 10:47:51.340234 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5c5bbff_cf34_40eb_b319_3b863d1e7776.slice/crio-0f4e54f3004e35aa360ced970f26ce1674bfa7008f20ea121d6259f42abaadf9 WatchSource:0}: Error finding container 0f4e54f3004e35aa360ced970f26ce1674bfa7008f20ea121d6259f42abaadf9: Status 404 returned error can't find the container with id 0f4e54f3004e35aa360ced970f26ce1674bfa7008f20ea121d6259f42abaadf9 Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.347444 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.351213 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nnhv\" (UniqueName: \"kubernetes.io/projected/4668e8a7-5460-4e80-bc1a-7895133d6708-kube-api-access-5nnhv\") pod \"machine-config-server-ghbxg\" (UID: \"4668e8a7-5460-4e80-bc1a-7895133d6708\") " pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: W1210 10:47:51.359748 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod727e26ec_b579_4b62_846e_c626fbf44f20.slice/crio-2b8e8867633fb5703c49be4b49089a04bbf17a8c90e2e037cb0014fd41cb7ec6 WatchSource:0}: Error finding container 2b8e8867633fb5703c49be4b49089a04bbf17a8c90e2e037cb0014fd41cb7ec6: Status 404 returned error can't find the container with id 2b8e8867633fb5703c49be4b49089a04bbf17a8c90e2e037cb0014fd41cb7ec6 Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.366633 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-754bh\" (UniqueName: \"kubernetes.io/projected/2727aac6-7187-49fa-afc4-f339aef7d96a-kube-api-access-754bh\") pod \"olm-operator-6b444d44fb-xm626\" (UID: \"2727aac6-7187-49fa-afc4-f339aef7d96a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.372621 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.385849 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.387569 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/499d5dfb-fb52-403b-9249-259a383d7562-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-ctj42\" (UID: \"499d5dfb-fb52-403b-9249-259a383d7562\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.406874 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.406940 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1aa13bce-7730-4b3e-aab0-41bfb905edf5-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jtgjc\" (UID: \"1aa13bce-7730-4b3e-aab0-41bfb905edf5\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.409376 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5nt7b"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.420452 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.425522 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.430841 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.431532 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.432196 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:51.932180181 +0000 UTC m=+152.252390931 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.432742 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dpff\" (UniqueName: \"kubernetes.io/projected/df9b9c19-3321-4bcd-a43a-0f2eb32ea147-kube-api-access-8dpff\") pod \"migrator-59844c95c7-68w64\" (UID: \"df9b9c19-3321-4bcd-a43a-0f2eb32ea147\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.438702 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.449751 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ppd6\" (UniqueName: \"kubernetes.io/projected/b066d982-7235-4c40-b72c-987b213031b2-kube-api-access-9ppd6\") pod \"cluster-image-registry-operator-dc59b4c8b-7hwpr\" (UID: \"b066d982-7235-4c40-b72c-987b213031b2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.450005 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-ghbxg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.458419 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ncfz6" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.479076 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn6mj\" (UniqueName: \"kubernetes.io/projected/c39ff528-9225-4c16-b25d-1b34929dadcb-kube-api-access-jn6mj\") pod \"marketplace-operator-79b997595-t9w8x\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: W1210 10:47:51.486070 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda40a32f4_3f8b_4397_a193_536f81131064.slice/crio-49a1237b2c5dcd4fc791bfd1b71b61d58e1c9ab8c74c9f0897f21c5f756f9ed5 WatchSource:0}: Error finding container 49a1237b2c5dcd4fc791bfd1b71b61d58e1c9ab8c74c9f0897f21c5f756f9ed5: Status 404 returned error can't find the container with id 49a1237b2c5dcd4fc791bfd1b71b61d58e1c9ab8c74c9f0897f21c5f756f9ed5 Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.509402 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-dqndv"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.517536 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n69w\" (UniqueName: \"kubernetes.io/projected/de21f9aa-1450-423f-93f7-75b6ca444f9f-kube-api-access-6n69w\") pod \"apiserver-7bbb656c7d-fl8rc\" (UID: \"de21f9aa-1450-423f-93f7-75b6ca444f9f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.531932 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wxvt5"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.562415 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.563313 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.063297366 +0000 UTC m=+152.383508116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.571897 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.577286 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsgpl\" (UniqueName: \"kubernetes.io/projected/fb0db14b-539a-489f-baea-92c499d99906-kube-api-access-qsgpl\") pod \"control-plane-machine-set-operator-78cbb6b69f-9gwb2\" (UID: \"fb0db14b-539a-489f-baea-92c499d99906\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.579657 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pfrk\" (UniqueName: \"kubernetes.io/projected/cf739648-f0c6-4f34-be4b-57f84579a9cb-kube-api-access-9pfrk\") pod \"package-server-manager-789f6589d5-bc6vq\" (UID: \"cf739648-f0c6-4f34-be4b-57f84579a9cb\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.584671 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ftd94"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.584716 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vl6t7"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.584730 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.604366 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.606197 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/faca7bed-c836-4f78-aaa9-29ec2b6db91b-bound-sa-token\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.607087 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5bfx\" (UniqueName: \"kubernetes.io/projected/677d94d3-efad-4264-88fb-cbbacbb2e267-kube-api-access-x5bfx\") pod \"csi-hostpathplugin-jxwjd\" (UID: \"677d94d3-efad-4264-88fb-cbbacbb2e267\") " pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.619394 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.621033 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6k5x\" (UniqueName: \"kubernetes.io/projected/05316a74-fdb1-46dd-a91c-eea173459834-kube-api-access-b6k5x\") pod \"kube-storage-version-migrator-operator-b67b599dd-w9vpx\" (UID: \"05316a74-fdb1-46dd-a91c-eea173459834\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.638860 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.641884 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrn5c\" (UniqueName: \"kubernetes.io/projected/b334d688-3122-4479-bfcb-37e70a059129-kube-api-access-nrn5c\") pod \"dns-operator-744455d44c-pmblg\" (UID: \"b334d688-3122-4479-bfcb-37e70a059129\") " pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.641888 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65ppq\" (UniqueName: \"kubernetes.io/projected/faca7bed-c836-4f78-aaa9-29ec2b6db91b-kube-api-access-65ppq\") pod \"ingress-operator-5b745b69d9-n9zvk\" (UID: \"faca7bed-c836-4f78-aaa9-29ec2b6db91b\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.659403 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.664592 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.667549 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.164974987 +0000 UTC m=+152.485185737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.668019 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbnct\" (UniqueName: \"kubernetes.io/projected/dd40681d-3ca3-4132-9cd0-c2a7982bdd45-kube-api-access-rbnct\") pod \"packageserver-d55dfcdfc-zwbl9\" (UID: \"dd40681d-3ca3-4132-9cd0-c2a7982bdd45\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.669682 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gw5v5"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.670029 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" Dec 10 10:47:51 crc kubenswrapper[4682]: W1210 10:47:51.671962 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46274096_898e_4f5e_9765_7f4058e4e5af.slice/crio-92ccebbc21d9edc0b98d64607d9027436e5bc99a1e7ad84ba5dec9eed198d096 WatchSource:0}: Error finding container 92ccebbc21d9edc0b98d64607d9027436e5bc99a1e7ad84ba5dec9eed198d096: Status 404 returned error can't find the container with id 92ccebbc21d9edc0b98d64607d9027436e5bc99a1e7ad84ba5dec9eed198d096 Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.676301 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.695137 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.705637 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.720760 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:51 crc kubenswrapper[4682]: W1210 10:47:51.746946 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97d3493e_8719_4556_bb3c_b2cfd0d39f0f.slice/crio-f98537cbe3cfbba1a2898ff7c76aca850852f4a2c50ccd494e39db3283b40da5 WatchSource:0}: Error finding container f98537cbe3cfbba1a2898ff7c76aca850852f4a2c50ccd494e39db3283b40da5: Status 404 returned error can't find the container with id f98537cbe3cfbba1a2898ff7c76aca850852f4a2c50ccd494e39db3283b40da5 Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.766776 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.767610 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.267597395 +0000 UTC m=+152.587808145 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.781772 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.809644 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4zh9p"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.865769 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.868072 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.868592 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.368569261 +0000 UTC m=+152.688780011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.885709 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.899627 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.946280 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-gs4k5"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.973380 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b"] Dec 10 10:47:51 crc kubenswrapper[4682]: I1210 10:47:51.974854 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:51 crc kubenswrapper[4682]: E1210 10:47:51.975135 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.475123832 +0000 UTC m=+152.795334582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.077603 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.078239 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.578221266 +0000 UTC m=+152.898432016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: W1210 10:47:52.090954 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9170d46_a469_4124_9c5a_57ce54d5dfec.slice/crio-950c80b494356b72bc1a8adebaecc7e18c4cdc5c0d5df4043ef61256c94ce70d WatchSource:0}: Error finding container 950c80b494356b72bc1a8adebaecc7e18c4cdc5c0d5df4043ef61256c94ce70d: Status 404 returned error can't find the container with id 950c80b494356b72bc1a8adebaecc7e18c4cdc5c0d5df4043ef61256c94ce70d Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.125999 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vv5lr"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.138430 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.149924 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7xtlk"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.159782 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ftd94" event={"ID":"660474bf-d4be-49dc-b993-5cd3161cb575","Type":"ContainerStarted","Data":"4d8ce25b297b29e76cdfb03dcc0ed04cc37c5384a257765571ba901a1738fe9c"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.169225 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" event={"ID":"7076dac7-bf2d-4191-81f5-73b260ff0a75","Type":"ContainerStarted","Data":"a5baa24c754af8cfa79ca635389855ea90bd2c5c01ed355af500d22d3bf8ffde"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.176042 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-ghbxg" event={"ID":"4668e8a7-5460-4e80-bc1a-7895133d6708","Type":"ContainerStarted","Data":"8a942e8dbe0eec0dab8183ac9fcb4342d995bc267a9dbd39a1a0947b8ad6a82e"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.179013 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.179695 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.679683851 +0000 UTC m=+152.999894601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.188373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-jfqfn" event={"ID":"51fb452a-e943-4222-a52b-dbdc0f378760","Type":"ContainerStarted","Data":"520398c1dcc9e8d56e85187bae66887d1b24471c70785e9b804909b46b1d47dd"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.194022 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gs4k5" event={"ID":"a9170d46-a469-4124-9c5a-57ce54d5dfec","Type":"ContainerStarted","Data":"950c80b494356b72bc1a8adebaecc7e18c4cdc5c0d5df4043ef61256c94ce70d"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.196572 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" event={"ID":"727e26ec-b579-4b62-846e-c626fbf44f20","Type":"ContainerStarted","Data":"01db765b580f7d8d2e2cfe6d7f94b46b7f38ac12fe31b2d170941b84c252b394"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.196613 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" event={"ID":"727e26ec-b579-4b62-846e-c626fbf44f20","Type":"ContainerStarted","Data":"2b8e8867633fb5703c49be4b49089a04bbf17a8c90e2e037cb0014fd41cb7ec6"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.207953 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.220985 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" event={"ID":"26599783-6b54-49f1-885a-3e87257c7063","Type":"ContainerStarted","Data":"fed1a14ff297cd16468b7e85113f8447dc5d5833e355d74869ba672c8c1fc7a7"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.221027 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" event={"ID":"26599783-6b54-49f1-885a-3e87257c7063","Type":"ContainerStarted","Data":"64101f44069e185b2d90116a21622b81ef5b25b2a84a9b48835c0f195148e60d"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.221556 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.223386 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" event={"ID":"1cb06630-9676-44bf-9dff-1b99d98f7991","Type":"ContainerStarted","Data":"6f570b7f8a8e489a21605f8ed92161525c5f9f447a9008124e4dfc24f3f13a09"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.223864 4682 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-gcpj6 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.223909 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" podUID="26599783-6b54-49f1-885a-3e87257c7063" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.224581 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" event={"ID":"8fe39f56-5b24-4b88-9cd6-02458b68986d","Type":"ContainerStarted","Data":"31eeb1495fb59ad8f71ac32f1f8194f1ffead8fb256c5bca3005f3a0f0abc203"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.225697 4682 generic.go:334] "Generic (PLEG): container finished" podID="e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9" containerID="a38f3205b2011ee94603a877e7b6ad9dafae363c5e7d29823d244d0ed3fc4698" exitCode=0 Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.225750 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" event={"ID":"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9","Type":"ContainerDied","Data":"a38f3205b2011ee94603a877e7b6ad9dafae363c5e7d29823d244d0ed3fc4698"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.225772 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" event={"ID":"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9","Type":"ContainerStarted","Data":"debca4d4dcb625a4f10f7a67d99e57ca7d908894dd149f507b70d622a80949b7"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.241688 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" event={"ID":"9e08710b-39f2-4458-82c7-7c4cd8978787","Type":"ContainerStarted","Data":"a724cd6c10ce19b244e609fd8ed91d129c2b843a42ab9956c0809a257b5090a9"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.241932 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" event={"ID":"9e08710b-39f2-4458-82c7-7c4cd8978787","Type":"ContainerStarted","Data":"309bfc75d33d778250011508fe66aa730e0e9d3121031a909f4f3513be3cc6a7"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.252703 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" event={"ID":"8696312f-d81d-442b-b80c-6938db27e66b","Type":"ContainerStarted","Data":"0a1a5d4af080f68841c39ccaeb6c5d71af8c8ed4a50a04086ba78241a16a4d9e"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.274796 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.305004 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.307764 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.807742023 +0000 UTC m=+153.127952773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.334090 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" event={"ID":"97d3493e-8719-4556-bb3c-b2cfd0d39f0f","Type":"ContainerStarted","Data":"f98537cbe3cfbba1a2898ff7c76aca850852f4a2c50ccd494e39db3283b40da5"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.340678 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ncfz6"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.343013 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" event={"ID":"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e","Type":"ContainerStarted","Data":"53274e6b8fc32563f5e7a206e146efdf1ac6be029c47e22d3e874c7e758cb887"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.343060 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" event={"ID":"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e","Type":"ContainerStarted","Data":"972037a6427d6f6c80a894c3dd11c200a93a24a468aafcc39caa545a293b5290"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.351327 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" event={"ID":"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea","Type":"ContainerStarted","Data":"f2a27b7ec101116308af764944b6e6436f059c77bb63da50077bf1ffb6894c8c"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.351379 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" event={"ID":"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea","Type":"ContainerStarted","Data":"7557d8ad9258f019d223a83ec784c9a30437f3372f35c4ad28b9df10fea96482"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.351837 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.361495 4682 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-mp9vx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.361546 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" event={"ID":"46274096-898e-4f5e-9765-7f4058e4e5af","Type":"ContainerStarted","Data":"92ccebbc21d9edc0b98d64607d9027436e5bc99a1e7ad84ba5dec9eed198d096"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.361578 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" podUID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.375800 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" event={"ID":"a7269eba-82ff-4387-a35a-767850aa52d7","Type":"ContainerStarted","Data":"73e0b6cf9b6554aaa2ed2de09628bf814323b879c57c682460aec6ce9ffd2256"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.407439 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.410192 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:52.910176793 +0000 UTC m=+153.230387543 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.438431 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" event={"ID":"b5c5bbff-cf34-40eb-b319-3b863d1e7776","Type":"ContainerStarted","Data":"0f4e54f3004e35aa360ced970f26ce1674bfa7008f20ea121d6259f42abaadf9"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.438508 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" event={"ID":"a40a32f4-3f8b-4397-a193-536f81131064","Type":"ContainerStarted","Data":"49a1237b2c5dcd4fc791bfd1b71b61d58e1c9ab8c74c9f0897f21c5f756f9ed5"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.438523 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" event={"ID":"d94b4cb1-bb7b-41c0-9670-654ba1336909","Type":"ContainerStarted","Data":"64318bc4a7e76a971aec02bf699950a3cd9768359254bcec5ed4325077d0c60f"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.438535 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" event={"ID":"6f8743aa-53f3-40d0-8af1-3daaae9404c4","Type":"ContainerStarted","Data":"40413801d06ff830f736f8c1585390f31167b60128909a5c8c054bbfea96ced1"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.438548 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" event={"ID":"2b0c1536-0797-49f3-8f0f-de2bb4760a6b","Type":"ContainerStarted","Data":"1dc4611d4852c58a449be510e1ec05200f96b62583182985dee60d6f994798d8"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.438560 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" event={"ID":"687946c8-cb4f-4db3-85ed-31606d7a3e39","Type":"ContainerStarted","Data":"5585970c9b8597f4be085107b706853f1e156926b46ff34b2cd7be65be99b805"} Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.512571 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.513000 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.012976076 +0000 UTC m=+153.333186826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.513117 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.514625 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.014609977 +0000 UTC m=+153.334820717 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.615951 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.616614 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.116597589 +0000 UTC m=+153.436808339 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.718424 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.719413 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.219396584 +0000 UTC m=+153.539607334 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.760131 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-dbszw"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.820049 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.820658 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.3206438 +0000 UTC m=+153.640854550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: W1210 10:47:52.855559 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcfbd68ba_8aec_439c_9549_9347c5e80d21.slice/crio-47d88898f3b19e888d4cc495669c337430a5e6393754effe74afccd4ee0d1614 WatchSource:0}: Error finding container 47d88898f3b19e888d4cc495669c337430a5e6393754effe74afccd4ee0d1614: Status 404 returned error can't find the container with id 47d88898f3b19e888d4cc495669c337430a5e6393754effe74afccd4ee0d1614 Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.857585 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.921409 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:52 crc kubenswrapper[4682]: E1210 10:47:52.921857 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.421842033 +0000 UTC m=+153.742052783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.932057 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.932112 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64"] Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.939341 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:52 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:52 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:52 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:52 crc kubenswrapper[4682]: I1210 10:47:52.940196 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.022855 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.023114 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.523053908 +0000 UTC m=+153.843264658 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.023297 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.023964 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.523952032 +0000 UTC m=+153.844162782 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.106400 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.110417 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.126026 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.126333 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.626315459 +0000 UTC m=+153.946526209 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.212551 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.227656 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.228027 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.728012782 +0000 UTC m=+154.048223532 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.274215 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.328551 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.329355 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.829335131 +0000 UTC m=+154.149545881 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.430653 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.431139 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:53.931125827 +0000 UTC m=+154.251336577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.470722 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-pmblg"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.472810 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t9w8x"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.511772 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" event={"ID":"de21f9aa-1450-423f-93f7-75b6ca444f9f","Type":"ContainerStarted","Data":"5ee86421e6110f7f3b7080f53899d32e09015c5dbf1749579014e8fac29594d6"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.527829 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" podStartSLOduration=132.527810915 podStartE2EDuration="2m12.527810915s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.527570496 +0000 UTC m=+153.847781256" watchObservedRunningTime="2025-12-10 10:47:53.527810915 +0000 UTC m=+153.848021655" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.528908 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" podStartSLOduration=133.528888455 podStartE2EDuration="2m13.528888455s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.487862915 +0000 UTC m=+153.808073685" watchObservedRunningTime="2025-12-10 10:47:53.528888455 +0000 UTC m=+153.849099205" Dec 10 10:47:53 crc kubenswrapper[4682]: W1210 10:47:53.529214 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb334d688_3122_4479_bfcb_37e70a059129.slice/crio-671f5e57a976eef26dcbf43b39796dc198fe5ab990b5354c6b3dae634775601c WatchSource:0}: Error finding container 671f5e57a976eef26dcbf43b39796dc198fe5ab990b5354c6b3dae634775601c: Status 404 returned error can't find the container with id 671f5e57a976eef26dcbf43b39796dc198fe5ab990b5354c6b3dae634775601c Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.531336 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.532027 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.032007089 +0000 UTC m=+154.352217839 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.536923 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" event={"ID":"fbf4cb28-9cf7-4496-b18c-97cfa7c39b7e","Type":"ContainerStarted","Data":"94a86eb6b784dc5cb5fd8088d0af2a27bc1479e38c74796eecf3dea40183729f"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.584396 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" event={"ID":"97d3493e-8719-4556-bb3c-b2cfd0d39f0f","Type":"ContainerStarted","Data":"40128a0d5187a5eaa67019a8a7c9bfa5be585bb9b9733638d6d77c94fcb4533f"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.588189 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.619896 4682 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-gw5v5 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.620333 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" podUID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.32:8443/healthz\": dial tcp 10.217.0.32:8443: connect: connection refused" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.634647 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.637270 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.137253723 +0000 UTC m=+154.457464533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.655515 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" event={"ID":"48e39be3-4c21-47aa-86cd-ec2830784ad6","Type":"ContainerStarted","Data":"b744fa01ce15e83284495a1942bdfd18cc2d74d269c9ef5d08da0f4312bb917d"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.665963 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-j2cmz" podStartSLOduration=132.665938849 podStartE2EDuration="2m12.665938849s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.634120878 +0000 UTC m=+153.954331658" watchObservedRunningTime="2025-12-10 10:47:53.665938849 +0000 UTC m=+153.986149599" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.667532 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" podStartSLOduration=133.667524157 podStartE2EDuration="2m13.667524157s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.665111318 +0000 UTC m=+153.985322078" watchObservedRunningTime="2025-12-10 10:47:53.667524157 +0000 UTC m=+153.987734907" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.676673 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-ghbxg" event={"ID":"4668e8a7-5460-4e80-bc1a-7895133d6708","Type":"ContainerStarted","Data":"20886378acecab621df5f2162ab82e41c7f1ed73e69babf6835ae0d3fe74494e"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.706104 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xpg4s" podStartSLOduration=132.706085226 podStartE2EDuration="2m12.706085226s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.696510314 +0000 UTC m=+154.016721074" watchObservedRunningTime="2025-12-10 10:47:53.706085226 +0000 UTC m=+154.026295976" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.706789 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.717851 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gs4k5" event={"ID":"a9170d46-a469-4124-9c5a-57ce54d5dfec","Type":"ContainerStarted","Data":"b3760403feda13c800f4ce6b4dc36e277e2272a7b5c06cafd75ce1884c5dfa22"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.748046 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.750158 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.250131958 +0000 UTC m=+154.570342708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.748190 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" podStartSLOduration=133.748166745 podStartE2EDuration="2m13.748166745s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.730966941 +0000 UTC m=+154.051177691" watchObservedRunningTime="2025-12-10 10:47:53.748166745 +0000 UTC m=+154.068377495" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.761400 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" event={"ID":"b066d982-7235-4c40-b72c-987b213031b2","Type":"ContainerStarted","Data":"4969ad805d7729bf84ddbbc4eb81d4637f88e9f187bb3c289e8a9ef9b2257e29"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.777356 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jxwjd"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.779443 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" event={"ID":"cfbd68ba-8aec-439c-9549-9347c5e80d21","Type":"ContainerStarted","Data":"47d88898f3b19e888d4cc495669c337430a5e6393754effe74afccd4ee0d1614"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.803300 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" event={"ID":"a40a32f4-3f8b-4397-a193-536f81131064","Type":"ContainerStarted","Data":"a0ba4efaaddcbfff7c2c884a6a83670166c70939241ca35db03dfe297bb57501"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.817998 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" event={"ID":"cf739648-f0c6-4f34-be4b-57f84579a9cb","Type":"ContainerStarted","Data":"cefb4a6a6599b64fde4d0054cb81beab6a60376e0b0094758a7b3ca0f74ab426"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.819338 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" podStartSLOduration=132.819323274 podStartE2EDuration="2m12.819323274s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.817674863 +0000 UTC m=+154.137885623" watchObservedRunningTime="2025-12-10 10:47:53.819323274 +0000 UTC m=+154.139534024" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.819430 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-jfqfn" podStartSLOduration=132.819425028 podStartE2EDuration="2m12.819425028s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.780432263 +0000 UTC m=+154.100643013" watchObservedRunningTime="2025-12-10 10:47:53.819425028 +0000 UTC m=+154.139635778" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.824581 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" event={"ID":"d94b4cb1-bb7b-41c0-9670-654ba1336909","Type":"ContainerStarted","Data":"41de3c6716889652936a1e5d4c16c5005bf928c30b18b659efd26b0e70debfbb"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.837588 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" event={"ID":"499d5dfb-fb52-403b-9249-259a383d7562","Type":"ContainerStarted","Data":"eab04a3b433497536aa5232e901f6950c7dc024d73e82e449fb93c6ad75a235c"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.850321 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.851387 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.351373033 +0000 UTC m=+154.671583783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.851693 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.853758 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lnqbd"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.855151 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" event={"ID":"df9b9c19-3321-4bcd-a43a-0f2eb32ea147","Type":"ContainerStarted","Data":"a2b4e8f92fb435fdaa9b8885aa0cb2507c09d056e36dfa01ea481fa6170c7c12"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.855258 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.861117 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.862263 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-ghbxg" podStartSLOduration=5.862248494 podStartE2EDuration="5.862248494s" podCreationTimestamp="2025-12-10 10:47:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.859859195 +0000 UTC m=+154.180069945" watchObservedRunningTime="2025-12-10 10:47:53.862248494 +0000 UTC m=+154.182459244" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.865556 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" event={"ID":"6f8743aa-53f3-40d0-8af1-3daaae9404c4","Type":"ContainerStarted","Data":"07e3ab61864dfaa1df7377f64423ce66d008e6fc8facbf6069b0e96f81d52ab5"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.865599 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.869622 4682 patch_prober.go:28] interesting pod/console-operator-58897d9998-vl6t7 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.869696 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" podUID="6f8743aa-53f3-40d0-8af1-3daaae9404c4" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/readyz\": dial tcp 10.217.0.29:8443: connect: connection refused" Dec 10 10:47:53 crc kubenswrapper[4682]: W1210 10:47:53.871752 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05316a74_fdb1_46dd_a91c_eea173459834.slice/crio-39bbcfc7c186eabb295c2171f3cfda8572e7c3af789a01b55d081c00ecda0818 WatchSource:0}: Error finding container 39bbcfc7c186eabb295c2171f3cfda8572e7c3af789a01b55d081c00ecda0818: Status 404 returned error can't find the container with id 39bbcfc7c186eabb295c2171f3cfda8572e7c3af789a01b55d081c00ecda0818 Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.871939 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ncfz6" event={"ID":"e21d3967-e194-40a2-b3cd-f482e84c70e3","Type":"ContainerStarted","Data":"0562e25eb26ba6322afafdb8ed330a52f92251099a3f7694168e227b9b195626"} Dec 10 10:47:53 crc kubenswrapper[4682]: W1210 10:47:53.872362 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfaca7bed_c836_4f78_aaa9_29ec2b6db91b.slice/crio-6fa64ff8f3fa205b82105e5923842979ffa08400fceb87602633a2a9162b4f63 WatchSource:0}: Error finding container 6fa64ff8f3fa205b82105e5923842979ffa08400fceb87602633a2a9162b4f63: Status 404 returned error can't find the container with id 6fa64ff8f3fa205b82105e5923842979ffa08400fceb87602633a2a9162b4f63 Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.882034 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ftd94" event={"ID":"660474bf-d4be-49dc-b993-5cd3161cb575","Type":"ContainerStarted","Data":"a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.883261 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lnqbd"] Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.888999 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" event={"ID":"8fe39f56-5b24-4b88-9cd6-02458b68986d","Type":"ContainerStarted","Data":"c5781352bcfb12d8cc4dec9e9b7c7452635310d0efc18510eda644811a0c1dde"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.916909 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x84jm" event={"ID":"b5c5bbff-cf34-40eb-b319-3b863d1e7776","Type":"ContainerStarted","Data":"f3f35ed31b81a75e794d7923a1d83a237e006885758f0f5ab89c2c4c7cf8071b"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.918506 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" event={"ID":"1aa13bce-7730-4b3e-aab0-41bfb905edf5","Type":"ContainerStarted","Data":"99af1ced58d182ae06752035c8a81e51904e34bf35dd61413ed94149d6c6f954"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.920202 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" event={"ID":"dd40681d-3ca3-4132-9cd0-c2a7982bdd45","Type":"ContainerStarted","Data":"9cfb6838ac72a8d649b3c6333da58c2469a2645434f46ae6720bc0b3e3e0bd9a"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.929581 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lfvph" event={"ID":"2b0c1536-0797-49f3-8f0f-de2bb4760a6b","Type":"ContainerStarted","Data":"8b6caf4985dbdb03fa81a602b660b62e4da827374dd5e5d095b9e5fcfc607368"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.942838 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:53 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:53 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:53 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.942886 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.945147 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-2v9xt" podStartSLOduration=133.945135944 podStartE2EDuration="2m13.945135944s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.901423746 +0000 UTC m=+154.221634516" watchObservedRunningTime="2025-12-10 10:47:53.945135944 +0000 UTC m=+154.265346684" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.945482 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" event={"ID":"1cb06630-9676-44bf-9dff-1b99d98f7991","Type":"ContainerStarted","Data":"f173478153e9eeb052224fa674af3333d701d266e1aba61c5762684801b1b564"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.947049 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" podStartSLOduration=132.947039024 podStartE2EDuration="2m12.947039024s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.9436762 +0000 UTC m=+154.263886940" watchObservedRunningTime="2025-12-10 10:47:53.947039024 +0000 UTC m=+154.267249774" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.952677 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.953091 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22bfm\" (UniqueName: \"kubernetes.io/projected/3ee34116-c378-4109-a0a2-e5ea084c98ad-kube-api-access-22bfm\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.953190 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-utilities\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.953391 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-catalog-content\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:53 crc kubenswrapper[4682]: E1210 10:47:53.954074 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.454058913 +0000 UTC m=+154.774269663 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.966274 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" event={"ID":"2727aac6-7187-49fa-afc4-f339aef7d96a","Type":"ContainerStarted","Data":"b84e9d305d628636cb701ae6f267efe80d26045e21fd1f6ff6f0126d16db43e6"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.967089 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.975075 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" event={"ID":"8696312f-d81d-442b-b80c-6938db27e66b","Type":"ContainerStarted","Data":"31e93c7a9b1984a849bb190c95ff344e344b664dc99ac83949bec7d050193ffc"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.984072 4682 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-xm626 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.984366 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" podUID="2727aac6-7187-49fa-afc4-f339aef7d96a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.993990 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" event={"ID":"687946c8-cb4f-4db3-85ed-31606d7a3e39","Type":"ContainerStarted","Data":"d10e6f3a118b8b38fe1f335f1ad5ea928040b4b90ba3bbfe545a5a851f334361"} Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.994294 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" podStartSLOduration=133.994272622 podStartE2EDuration="2m13.994272622s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:53.991149117 +0000 UTC m=+154.311359867" watchObservedRunningTime="2025-12-10 10:47:53.994272622 +0000 UTC m=+154.314483372" Dec 10 10:47:53 crc kubenswrapper[4682]: I1210 10:47:53.996143 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" event={"ID":"fb0db14b-539a-489f-baea-92c499d99906","Type":"ContainerStarted","Data":"462c5b8f713e630fbc47a79046f378dd6976a186c703b6d1ac4dc8240963ecdb"} Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.002410 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7xtlk" event={"ID":"d6434666-a341-4560-a0ff-92d26a79c668","Type":"ContainerStarted","Data":"37f28c0dccaf55cf6b407dbdc56a36fb81556c4e812d66a033fbf8aa3ad686e1"} Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.002457 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.011424 4682 patch_prober.go:28] interesting pod/downloads-7954f5f757-7xtlk container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.011507 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7xtlk" podUID="d6434666-a341-4560-a0ff-92d26a79c668" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.014523 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-gcpj6" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.017406 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.033875 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j6tv8"] Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.034988 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.042961 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-ftd94" podStartSLOduration=133.042929603 podStartE2EDuration="2m13.042929603s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.030999034 +0000 UTC m=+154.351209804" watchObservedRunningTime="2025-12-10 10:47:54.042929603 +0000 UTC m=+154.363140353" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.051355 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j6tv8"] Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054000 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7nl5\" (UniqueName: \"kubernetes.io/projected/87412cec-b4af-4f63-a127-4ba4214d57b8-kube-api-access-f7nl5\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054165 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054204 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22bfm\" (UniqueName: \"kubernetes.io/projected/3ee34116-c378-4109-a0a2-e5ea084c98ad-kube-api-access-22bfm\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054283 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-utilities\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054306 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-utilities\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054449 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-catalog-content\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.054612 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-catalog-content\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.057595 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.058037 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.558015779 +0000 UTC m=+154.878226579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.058726 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-utilities\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.064757 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-catalog-content\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.142710 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jc5g5" podStartSLOduration=133.142689243 podStartE2EDuration="2m13.142689243s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.140635648 +0000 UTC m=+154.460846418" watchObservedRunningTime="2025-12-10 10:47:54.142689243 +0000 UTC m=+154.462899993" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.155678 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.155999 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-catalog-content\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.156054 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7nl5\" (UniqueName: \"kubernetes.io/projected/87412cec-b4af-4f63-a127-4ba4214d57b8-kube-api-access-f7nl5\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.156130 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-utilities\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.156764 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-utilities\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.156928 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.656900176 +0000 UTC m=+154.977110916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.157237 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-catalog-content\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.185932 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22bfm\" (UniqueName: \"kubernetes.io/projected/3ee34116-c378-4109-a0a2-e5ea084c98ad-kube-api-access-22bfm\") pod \"community-operators-lnqbd\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.224016 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" podStartSLOduration=133.223997115 podStartE2EDuration="2m13.223997115s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.194176898 +0000 UTC m=+154.514387648" watchObservedRunningTime="2025-12-10 10:47:54.223997115 +0000 UTC m=+154.544207865" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.224681 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-skvvg"] Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.225683 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.251873 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-skvvg"] Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.259098 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-utilities\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.259173 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.259252 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-catalog-content\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.259274 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hb5j\" (UniqueName: \"kubernetes.io/projected/81310b9c-2d81-4693-afa2-14bfa74e3bc9-kube-api-access-6hb5j\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.259806 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.759784983 +0000 UTC m=+155.079995733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.261217 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7nl5\" (UniqueName: \"kubernetes.io/projected/87412cec-b4af-4f63-a127-4ba4214d57b8-kube-api-access-f7nl5\") pod \"certified-operators-j6tv8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.331879 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4rz5b" podStartSLOduration=133.331849605 podStartE2EDuration="2m13.331849605s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.285750808 +0000 UTC m=+154.605961558" watchObservedRunningTime="2025-12-10 10:47:54.331849605 +0000 UTC m=+154.652060355" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.360151 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.360385 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-utilities\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.360490 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-catalog-content\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.360512 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hb5j\" (UniqueName: \"kubernetes.io/projected/81310b9c-2d81-4693-afa2-14bfa74e3bc9-kube-api-access-6hb5j\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.361519 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-catalog-content\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.361725 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.861708413 +0000 UTC m=+155.181919163 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.361982 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-utilities\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.371569 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7xtlk" podStartSLOduration=133.371551196 podStartE2EDuration="2m13.371551196s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.329566411 +0000 UTC m=+154.649777171" watchObservedRunningTime="2025-12-10 10:47:54.371551196 +0000 UTC m=+154.691761946" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.372096 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" podStartSLOduration=133.372091745 podStartE2EDuration="2m13.372091745s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.369984008 +0000 UTC m=+154.690194758" watchObservedRunningTime="2025-12-10 10:47:54.372091745 +0000 UTC m=+154.692302485" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.422565 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hb5j\" (UniqueName: \"kubernetes.io/projected/81310b9c-2d81-4693-afa2-14bfa74e3bc9-kube-api-access-6hb5j\") pod \"community-operators-skvvg\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.441749 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rjvnf"] Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.447900 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.461435 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" podStartSLOduration=133.461410493 podStartE2EDuration="2m13.461410493s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:54.45210498 +0000 UTC m=+154.772315730" watchObservedRunningTime="2025-12-10 10:47:54.461410493 +0000 UTC m=+154.781621243" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.464077 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.464145 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m72h6\" (UniqueName: \"kubernetes.io/projected/abde8dd6-2027-45fa-9052-e619c5cadecf-kube-api-access-m72h6\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.464195 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-catalog-content\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.464241 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-utilities\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.464687 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:54.964663682 +0000 UTC m=+155.284874432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.489332 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rjvnf"] Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.564889 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.565660 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.065616458 +0000 UTC m=+155.385827208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.566010 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m72h6\" (UniqueName: \"kubernetes.io/projected/abde8dd6-2027-45fa-9052-e619c5cadecf-kube-api-access-m72h6\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.566043 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-catalog-content\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.566074 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-utilities\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.566131 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.566378 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.066366526 +0000 UTC m=+155.386577276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.566997 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-catalog-content\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.567198 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-utilities\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.629243 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m72h6\" (UniqueName: \"kubernetes.io/projected/abde8dd6-2027-45fa-9052-e619c5cadecf-kube-api-access-m72h6\") pod \"certified-operators-rjvnf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.671288 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.671797 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.171779515 +0000 UTC m=+155.491990265 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.774274 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.774624 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.27461254 +0000 UTC m=+155.594823290 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.875976 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.876403 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.376387155 +0000 UTC m=+155.696597905 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.940508 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:54 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:54 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:54 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.940888 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.955488 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.961693 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:47:54 crc kubenswrapper[4682]: I1210 10:47:54.978786 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:54 crc kubenswrapper[4682]: E1210 10:47:54.979259 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.47923371 +0000 UTC m=+155.799444460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.049010 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" event={"ID":"2727aac6-7187-49fa-afc4-f339aef7d96a","Type":"ContainerStarted","Data":"c09c58c8cd9bc991da0ce4475bc2d677b240cf1cd0b4e5b0fee8437d787d10d1"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.060812 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" event={"ID":"677d94d3-efad-4264-88fb-cbbacbb2e267","Type":"ContainerStarted","Data":"081e58383a70ce526b44f2c3ec01e34c9a3d98d9ff1ebdbeec207826eb410b70"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.080244 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.081281 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.581256595 +0000 UTC m=+155.901467345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.085155 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-xm626" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.102315 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" event={"ID":"8696312f-d81d-442b-b80c-6938db27e66b","Type":"ContainerStarted","Data":"359b81ccd39d5113352e71e852ea93eaf353f49e19bbb12046ebbab80036faf3"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.134887 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" event={"ID":"dd40681d-3ca3-4132-9cd0-c2a7982bdd45","Type":"ContainerStarted","Data":"f7004347a95a8558c9386fb9d946a9bb30d3b7ead604dd93be4b701e2524d444"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.135865 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.144251 4682 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zwbl9 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.144306 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" podUID="dd40681d-3ca3-4132-9cd0-c2a7982bdd45" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.160501 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" event={"ID":"7076dac7-bf2d-4191-81f5-73b260ff0a75","Type":"ContainerStarted","Data":"5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.160553 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.168268 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-dqndv" podStartSLOduration=134.168248376 podStartE2EDuration="2m14.168248376s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.167231458 +0000 UTC m=+155.487442208" watchObservedRunningTime="2025-12-10 10:47:55.168248376 +0000 UTC m=+155.488459126" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.174431 4682 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4zh9p container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.8:6443/healthz\": dial tcp 10.217.0.8:6443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.174533 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" podUID="7076dac7-bf2d-4191-81f5-73b260ff0a75" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.8:6443/healthz\": dial tcp 10.217.0.8:6443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.176133 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ncfz6" event={"ID":"e21d3967-e194-40a2-b3cd-f482e84c70e3","Type":"ContainerStarted","Data":"c266ac7dfe0fd1ba39cff09782a0bebd966b5e2e6f17ae37a7f1f4bc1d648f78"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.183058 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" event={"ID":"b066d982-7235-4c40-b72c-987b213031b2","Type":"ContainerStarted","Data":"7b5c91ea50d0f38cca534922feca6ebabeddbc945204356ac7e0fe03e6cdbb17"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.184331 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.184746 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.684728403 +0000 UTC m=+156.004939153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.206414 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.207044 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" event={"ID":"499d5dfb-fb52-403b-9249-259a383d7562","Type":"ContainerStarted","Data":"99c87cbadc5311dfc0066ae4524fd73d75048eaeb35d7e6e3b804824a60efd6d"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.225630 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" event={"ID":"b334d688-3122-4479-bfcb-37e70a059129","Type":"ContainerStarted","Data":"671f5e57a976eef26dcbf43b39796dc198fe5ab990b5354c6b3dae634775601c"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.258496 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.272433 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" event={"ID":"a40a32f4-3f8b-4397-a193-536f81131064","Type":"ContainerStarted","Data":"6224a7ff5ea5baeaacb0867320ba64bdcf2c57dcd20e4c74591a0e0c0d1ea861"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.277664 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" podStartSLOduration=134.277631462 podStartE2EDuration="2m14.277631462s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.276812941 +0000 UTC m=+155.597023691" watchObservedRunningTime="2025-12-10 10:47:55.277631462 +0000 UTC m=+155.597842212" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.279111 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" podStartSLOduration=135.279097775 podStartE2EDuration="2m15.279097775s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.20611852 +0000 UTC m=+155.526329280" watchObservedRunningTime="2025-12-10 10:47:55.279097775 +0000 UTC m=+155.599308535" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.300907 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.301986 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.801966337 +0000 UTC m=+156.122177087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.303727 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" event={"ID":"cf739648-f0c6-4f34-be4b-57f84579a9cb","Type":"ContainerStarted","Data":"9732567836f3d32b0e6cccc9a5487df76dc32934f04afe92bfa3ba2567841ca1"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.351318 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" event={"ID":"48e39be3-4c21-47aa-86cd-ec2830784ad6","Type":"ContainerStarted","Data":"ddbfa25f1dc6f8dc441ebcca63e7b9ff7a95dc358d1694db610fc27dcc7e818f"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.381573 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" event={"ID":"46274096-898e-4f5e-9765-7f4058e4e5af","Type":"ContainerStarted","Data":"27ece1cc2ed9876feafdb84812d5951f19d2405874fa51004195a26eff35542a"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.393058 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7hwpr" podStartSLOduration=134.393034499 podStartE2EDuration="2m14.393034499s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.390399242 +0000 UTC m=+155.710609992" watchObservedRunningTime="2025-12-10 10:47:55.393034499 +0000 UTC m=+155.713245249" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.393676 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-5nt7b" podStartSLOduration=134.393667673 podStartE2EDuration="2m14.393667673s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.354290803 +0000 UTC m=+155.674501553" watchObservedRunningTime="2025-12-10 10:47:55.393667673 +0000 UTC m=+155.713878423" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.406237 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.409193 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:55.909180313 +0000 UTC m=+156.229391063 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.413867 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-jdqwb" event={"ID":"687946c8-cb4f-4db3-85ed-31606d7a3e39","Type":"ContainerStarted","Data":"4fcfc8c51893149599a7f26a3fab81c13cde14772c5bc5f1f0c8c0b19b6f31d4"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.469828 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" event={"ID":"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9","Type":"ContainerStarted","Data":"f2410342f696dbb0d64ab14d35cca20b977e546158150789aacd53c543c7363d"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.494248 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-ncfz6" podStartSLOduration=7.494218503 podStartE2EDuration="7.494218503s" podCreationTimestamp="2025-12-10 10:47:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.444838265 +0000 UTC m=+155.765049025" watchObservedRunningTime="2025-12-10 10:47:55.494218503 +0000 UTC m=+155.814429253" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.509261 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.510541 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.010525353 +0000 UTC m=+156.330736103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.540348 4682 generic.go:334] "Generic (PLEG): container finished" podID="cfbd68ba-8aec-439c-9549-9347c5e80d21" containerID="3bf064e5aab281bcdcb719ddbea8f22c912848e1ee0cbb2e5d5417b293271dcf" exitCode=0 Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.540458 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" event={"ID":"cfbd68ba-8aec-439c-9549-9347c5e80d21","Type":"ContainerDied","Data":"3bf064e5aab281bcdcb719ddbea8f22c912848e1ee0cbb2e5d5417b293271dcf"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.610045 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" event={"ID":"fb0db14b-539a-489f-baea-92c499d99906","Type":"ContainerStarted","Data":"60d126b01f43902092bbd485407dbf5368a365a3dc8bc9a8ba80bdee9554f56b"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.610881 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-ctj42" podStartSLOduration=134.610862576 podStartE2EDuration="2m14.610862576s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.494016936 +0000 UTC m=+155.814227686" watchObservedRunningTime="2025-12-10 10:47:55.610862576 +0000 UTC m=+155.931073326" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.611215 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.611615 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vv5lr" podStartSLOduration=134.611609493 podStartE2EDuration="2m14.611609493s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.610394148 +0000 UTC m=+155.930604908" watchObservedRunningTime="2025-12-10 10:47:55.611609493 +0000 UTC m=+155.931820243" Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.613366 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.113354087 +0000 UTC m=+156.433564837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.630786 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.630843 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.655528 4682 patch_prober.go:28] interesting pod/apiserver-76f77b778f-ccs9l container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.655584 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" podUID="e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.713329 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.728348 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.228308328 +0000 UTC m=+156.548519078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.736160 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" event={"ID":"1aa13bce-7730-4b3e-aab0-41bfb905edf5","Type":"ContainerStarted","Data":"0ac4bf5af9129b41c3de97326332628778de3e786d53ae335f4cd8f1bbbabf41"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.744239 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-wxvt5" podStartSLOduration=134.744204222 podStartE2EDuration="2m14.744204222s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.737007548 +0000 UTC m=+156.057218308" watchObservedRunningTime="2025-12-10 10:47:55.744204222 +0000 UTC m=+156.064414972" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.744393 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" podStartSLOduration=135.744380889 podStartE2EDuration="2m15.744380889s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.680404135 +0000 UTC m=+156.000614895" watchObservedRunningTime="2025-12-10 10:47:55.744380889 +0000 UTC m=+156.064591639" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.785615 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-9gwb2" podStartSLOduration=134.785584836 podStartE2EDuration="2m14.785584836s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.783974276 +0000 UTC m=+156.104185036" watchObservedRunningTime="2025-12-10 10:47:55.785584836 +0000 UTC m=+156.105795586" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.794910 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" event={"ID":"df9b9c19-3321-4bcd-a43a-0f2eb32ea147","Type":"ContainerStarted","Data":"1c7c6a37d97c4b16b40cb88a54e750060f2c0241a16b28fdd5eea279d32b1901"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.802622 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" event={"ID":"c39ff528-9225-4c16-b25d-1b34929dadcb","Type":"ContainerStarted","Data":"d382e979d0011c2522f3335a6c2587e2a253fcd79a4c608a7cbcf89815349ca7"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.802663 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" event={"ID":"c39ff528-9225-4c16-b25d-1b34929dadcb","Type":"ContainerStarted","Data":"8c15d96946c9d5e6944459e2840483eed07a0da73a3d4eebf59648589f997ae3"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.804901 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lnqbd"] Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.808977 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.818756 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.819103 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.319087589 +0000 UTC m=+156.639298339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.850423 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gp62l"] Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.850690 4682 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-t9w8x container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.850740 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.877382 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" event={"ID":"faca7bed-c836-4f78-aaa9-29ec2b6db91b","Type":"ContainerStarted","Data":"6fa64ff8f3fa205b82105e5923842979ffa08400fceb87602633a2a9162b4f63"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.877513 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.886644 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 10:47:55 crc kubenswrapper[4682]: W1210 10:47:55.914002 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ee34116_c378_4109_a0a2_e5ea084c98ad.slice/crio-825231ed8ff959b63dd5700ee52ed195d562f8242f050f17d739673f47e8a9fc WatchSource:0}: Error finding container 825231ed8ff959b63dd5700ee52ed195d562f8242f050f17d739673f47e8a9fc: Status 404 returned error can't find the container with id 825231ed8ff959b63dd5700ee52ed195d562f8242f050f17d739673f47e8a9fc Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.914755 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7xtlk" event={"ID":"d6434666-a341-4560-a0ff-92d26a79c668","Type":"ContainerStarted","Data":"cd7553f2041d32e39378e79cd965ca87a00fce68290ab3c6ec4e8cc292332f6d"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.916019 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jtgjc" podStartSLOduration=134.916008775 podStartE2EDuration="2m14.916008775s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:55.915174235 +0000 UTC m=+156.235384985" watchObservedRunningTime="2025-12-10 10:47:55.916008775 +0000 UTC m=+156.236219525" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.916125 4682 patch_prober.go:28] interesting pod/downloads-7954f5f757-7xtlk container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.916167 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7xtlk" podUID="d6434666-a341-4560-a0ff-92d26a79c668" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.920021 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gp62l"] Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.923557 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:55 crc kubenswrapper[4682]: E1210 10:47:55.926280 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.426263203 +0000 UTC m=+156.746473953 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.969163 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gs4k5" event={"ID":"a9170d46-a469-4124-9c5a-57ce54d5dfec","Type":"ContainerStarted","Data":"9c38f52de446b72ebd66bf58b92ff7b071122233d86c34eee2c4c31ad7217902"} Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.969734 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-gs4k5" Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.994154 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:55 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:55 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:55 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:55 crc kubenswrapper[4682]: I1210 10:47:55.994625 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.011958 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" event={"ID":"05316a74-fdb1-46dd-a91c-eea173459834","Type":"ContainerStarted","Data":"39bbcfc7c186eabb295c2171f3cfda8572e7c3af789a01b55d081c00ecda0818"} Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.034170 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqm7j\" (UniqueName: \"kubernetes.io/projected/06e73e24-a522-4e08-98e0-5199a83b016f-kube-api-access-xqm7j\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.034221 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.034256 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-utilities\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.034297 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-catalog-content\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.036069 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.536057003 +0000 UTC m=+156.856267753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.048871 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-vl6t7" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.078306 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" podStartSLOduration=135.078287838 podStartE2EDuration="2m15.078287838s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:56.063758693 +0000 UTC m=+156.383969463" watchObservedRunningTime="2025-12-10 10:47:56.078287838 +0000 UTC m=+156.398498588" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.118641 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.140016 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.140748 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqm7j\" (UniqueName: \"kubernetes.io/projected/06e73e24-a522-4e08-98e0-5199a83b016f-kube-api-access-xqm7j\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.141164 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-utilities\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.141436 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-catalog-content\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.142203 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.642183519 +0000 UTC m=+156.962394279 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.149297 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-utilities\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.157256 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-catalog-content\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.160782 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j6tv8"] Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.235816 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqm7j\" (UniqueName: \"kubernetes.io/projected/06e73e24-a522-4e08-98e0-5199a83b016f-kube-api-access-xqm7j\") pod \"redhat-marketplace-gp62l\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.248721 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.249052 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.749040582 +0000 UTC m=+157.069251332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.296674 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" podStartSLOduration=135.296653174 podStartE2EDuration="2m15.296653174s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:56.211960887 +0000 UTC m=+156.532171657" watchObservedRunningTime="2025-12-10 10:47:56.296653174 +0000 UTC m=+156.616863914" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.297598 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vv9wv"] Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.307285 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-gs4k5" podStartSLOduration=8.307269494 podStartE2EDuration="8.307269494s" podCreationTimestamp="2025-12-10 10:47:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:56.305928206 +0000 UTC m=+156.626138956" watchObservedRunningTime="2025-12-10 10:47:56.307269494 +0000 UTC m=+156.627480244" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.309730 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.313155 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.353645 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.353964 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.853949313 +0000 UTC m=+157.174160063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.362351 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv9wv"] Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.405318 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" podStartSLOduration=135.405293942 podStartE2EDuration="2m15.405293942s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:56.401860695 +0000 UTC m=+156.722071465" watchObservedRunningTime="2025-12-10 10:47:56.405293942 +0000 UTC m=+156.725504692" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.459449 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-catalog-content\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.459554 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-utilities\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.459592 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l2hq\" (UniqueName: \"kubernetes.io/projected/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-kube-api-access-9l2hq\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.459634 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.459927 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:56.959914763 +0000 UTC m=+157.280125513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.547682 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-skvvg"] Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.561972 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.061935557 +0000 UTC m=+157.382146307 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.560974 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.563117 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l2hq\" (UniqueName: \"kubernetes.io/projected/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-kube-api-access-9l2hq\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.574441 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.575006 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.074981778 +0000 UTC m=+157.395192538 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.587438 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-catalog-content\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.587754 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-utilities\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.588280 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-utilities\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.588572 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-catalog-content\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.615423 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l2hq\" (UniqueName: \"kubernetes.io/projected/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-kube-api-access-9l2hq\") pod \"redhat-marketplace-vv9wv\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.676212 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.688582 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.688921 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.18890478 +0000 UTC m=+157.509115530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.792027 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.792427 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.292411329 +0000 UTC m=+157.612622079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.893853 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.894247 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.394220666 +0000 UTC m=+157.714431426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.894502 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.894871 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.394828818 +0000 UTC m=+157.715039568 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.908619 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rjvnf"] Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.930663 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:56 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:56 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:56 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.930706 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:56 crc kubenswrapper[4682]: I1210 10:47:56.996114 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:56 crc kubenswrapper[4682]: E1210 10:47:56.998694 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.49867069 +0000 UTC m=+157.818881440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.038010 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4jlk2"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.039305 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.048943 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.051363 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4jlk2"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.057253 4682 generic.go:334] "Generic (PLEG): container finished" podID="de21f9aa-1450-423f-93f7-75b6ca444f9f" containerID="c5f82ad5346ef132fbbef9daded6ec7c3ddcc15e497370df70f3aed669170142" exitCode=0 Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.057342 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" event={"ID":"de21f9aa-1450-423f-93f7-75b6ca444f9f","Type":"ContainerDied","Data":"c5f82ad5346ef132fbbef9daded6ec7c3ddcc15e497370df70f3aed669170142"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.090568 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" event={"ID":"e6d3e1df-0219-4883-8cf6-5f6b7dbbceb9","Type":"ContainerStarted","Data":"4ef9ae90724208c5b363fd62597d3327d42e4923bd3003345f903937de905750"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.107945 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.108426 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.608413609 +0000 UTC m=+157.928624359 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.122643 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" event={"ID":"cfbd68ba-8aec-439c-9549-9347c5e80d21","Type":"ContainerStarted","Data":"73667638d5b765e548e3760500979ce63e3b760e5458524dc28d0ff6832ba6f2"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.123063 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.124690 4682 generic.go:334] "Generic (PLEG): container finished" podID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerID="cb6fba4689a476c08eb86f8b4ed19f4aeaf653f3886d562c394650dffcf0caca" exitCode=0 Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.124724 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lnqbd" event={"ID":"3ee34116-c378-4109-a0a2-e5ea084c98ad","Type":"ContainerDied","Data":"cb6fba4689a476c08eb86f8b4ed19f4aeaf653f3886d562c394650dffcf0caca"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.124739 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lnqbd" event={"ID":"3ee34116-c378-4109-a0a2-e5ea084c98ad","Type":"ContainerStarted","Data":"825231ed8ff959b63dd5700ee52ed195d562f8242f050f17d739673f47e8a9fc"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.126134 4682 generic.go:334] "Generic (PLEG): container finished" podID="8fe39f56-5b24-4b88-9cd6-02458b68986d" containerID="c5781352bcfb12d8cc4dec9e9b7c7452635310d0efc18510eda644811a0c1dde" exitCode=0 Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.126168 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" event={"ID":"8fe39f56-5b24-4b88-9cd6-02458b68986d","Type":"ContainerDied","Data":"c5781352bcfb12d8cc4dec9e9b7c7452635310d0efc18510eda644811a0c1dde"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.132821 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.172797 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" event={"ID":"677d94d3-efad-4264-88fb-cbbacbb2e267","Type":"ContainerStarted","Data":"7f64ed8e251310c31b9d663663fc80265282aaf16812395cad1af26298887afc"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.213016 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.213254 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-utilities\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.213504 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2224j\" (UniqueName: \"kubernetes.io/projected/7ec82d4e-7aac-438d-ada1-ec31302939a7-kube-api-access-2224j\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.213527 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-catalog-content\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.214175 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.71415897 +0000 UTC m=+158.034369720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.224183 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" event={"ID":"b334d688-3122-4479-bfcb-37e70a059129","Type":"ContainerStarted","Data":"3e0acf22b66d116fc273eaea19eefe9b909e9af14f57e1b8848dc8128e96833d"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.224236 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" event={"ID":"b334d688-3122-4479-bfcb-37e70a059129","Type":"ContainerStarted","Data":"9759f04dedddc81423817f59237a3a81673941b60f5b9dfec19c8ee995089ad7"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.225000 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" podStartSLOduration=137.224987469 podStartE2EDuration="2m17.224987469s" podCreationTimestamp="2025-12-10 10:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:57.219218256 +0000 UTC m=+157.539429026" watchObservedRunningTime="2025-12-10 10:47:57.224987469 +0000 UTC m=+157.545198219" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.294767 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerStarted","Data":"1175bfe622751cdbd3f521a40b5beae305653b71136601e4cfd06adcb7464181"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.315125 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-utilities\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.315261 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.315319 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2224j\" (UniqueName: \"kubernetes.io/projected/7ec82d4e-7aac-438d-ada1-ec31302939a7-kube-api-access-2224j\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.315341 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-catalog-content\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.315865 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-catalog-content\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.316138 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-utilities\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.317542 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.817518244 +0000 UTC m=+158.137729024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.338227 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" event={"ID":"faca7bed-c836-4f78-aaa9-29ec2b6db91b","Type":"ContainerStarted","Data":"abf02c59dab435fe0e9088f81a93c05e9ac5c3d42b0dce3a5b86fc0d90f0280f"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.338276 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" event={"ID":"faca7bed-c836-4f78-aaa9-29ec2b6db91b","Type":"ContainerStarted","Data":"66ea417a4c2639fa180d584ceca478269b12b6e69785719a2e5d47874aff7534"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.355464 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-pmblg" podStartSLOduration=136.35544456 podStartE2EDuration="2m16.35544456s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:57.309954796 +0000 UTC m=+157.630165556" watchObservedRunningTime="2025-12-10 10:47:57.35544456 +0000 UTC m=+157.675655310" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.376870 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2224j\" (UniqueName: \"kubernetes.io/projected/7ec82d4e-7aac-438d-ada1-ec31302939a7-kube-api-access-2224j\") pod \"redhat-operators-4jlk2\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.379942 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerDied","Data":"4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.379933 4682 generic.go:334] "Generic (PLEG): container finished" podID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerID="4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6" exitCode=0 Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.380075 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerStarted","Data":"eed0cb95d7df5d931e0110e6fb836aeefb352b809dde0fda16bdceb16a2a0dba"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.385605 4682 generic.go:334] "Generic (PLEG): container finished" podID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerID="9855b10135f5e3382cd0db5865225b80cfab8be62830bc82781bd3eb1e929ad8" exitCode=0 Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.385665 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerDied","Data":"9855b10135f5e3382cd0db5865225b80cfab8be62830bc82781bd3eb1e929ad8"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.385688 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerStarted","Data":"12c72aee5ae09367ba3b0e9a60b86ee7af7122953ec159c4dc35688ba9962c68"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.395400 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" event={"ID":"cf739648-f0c6-4f34-be4b-57f84579a9cb","Type":"ContainerStarted","Data":"f91d66cfc65dca1f54b1e2e4e6e0820acadcb55dc8869beb6bf6d03d64adae6f"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.395691 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.420034 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.421128 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:57.921109036 +0000 UTC m=+158.241319796 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.422665 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-68w64" event={"ID":"df9b9c19-3321-4bcd-a43a-0f2eb32ea147","Type":"ContainerStarted","Data":"033be9d3eb41cda4d92e1af377affd546cd104ce1ff717eb927d1252b4133771"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.423896 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.424995 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-n9zvk" podStartSLOduration=136.424971379 podStartE2EDuration="2m16.424971379s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:57.419358812 +0000 UTC m=+157.739569582" watchObservedRunningTime="2025-12-10 10:47:57.424971379 +0000 UTC m=+157.745182129" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.473825 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jxzps"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.483559 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.491567 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w9vpx" event={"ID":"05316a74-fdb1-46dd-a91c-eea173459834","Type":"ContainerStarted","Data":"62a3c126bf9b26fb28c1f90c00e64da65679c74096f8e9ca39758ae0a494fec6"} Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.494415 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv9wv"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.496658 4682 patch_prober.go:28] interesting pod/downloads-7954f5f757-7xtlk container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.506721 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7xtlk" podUID="d6434666-a341-4560-a0ff-92d26a79c668" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.502844 4682 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-t9w8x container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.507436 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.520939 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zwbl9" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.521825 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.523743 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.023731424 +0000 UTC m=+158.343942174 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.531331 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jxzps"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.541658 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.605677 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.608333 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.618901 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.619443 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.641187 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.641961 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gq96\" (UniqueName: \"kubernetes.io/projected/2e7d49cd-f122-470b-b278-874045f4f089-kube-api-access-6gq96\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.642166 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df25cd5b-b701-45e7-852f-f88e30ed44c3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.646538 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.146506032 +0000 UTC m=+158.466716782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.750590 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.753361 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-catalog-content\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.753429 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-utilities\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.753651 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.753706 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df25cd5b-b701-45e7-852f-f88e30ed44c3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.759699 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.259676347 +0000 UTC m=+158.579887097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.812565 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gp62l"] Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.830144 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" podStartSLOduration=136.830109739 podStartE2EDuration="2m16.830109739s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:57.799951349 +0000 UTC m=+158.120162119" watchObservedRunningTime="2025-12-10 10:47:57.830109739 +0000 UTC m=+158.150320489" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.859257 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.859702 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df25cd5b-b701-45e7-852f-f88e30ed44c3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.859772 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-catalog-content\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.859801 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-utilities\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.859845 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df25cd5b-b701-45e7-852f-f88e30ed44c3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.859891 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gq96\" (UniqueName: \"kubernetes.io/projected/2e7d49cd-f122-470b-b278-874045f4f089-kube-api-access-6gq96\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.860460 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.360439625 +0000 UTC m=+158.680650375 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.860517 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df25cd5b-b701-45e7-852f-f88e30ed44c3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.861014 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-catalog-content\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.861193 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-utilities\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.925762 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df25cd5b-b701-45e7-852f-f88e30ed44c3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.963320 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:57 crc kubenswrapper[4682]: E1210 10:47:57.963686 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.463670685 +0000 UTC m=+158.783881435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.971369 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:57 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:57 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:57 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:57 crc kubenswrapper[4682]: I1210 10:47:57.971423 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.029522 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gq96\" (UniqueName: \"kubernetes.io/projected/2e7d49cd-f122-470b-b278-874045f4f089-kube-api-access-6gq96\") pod \"redhat-operators-jxzps\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.065936 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.066354 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.566340123 +0000 UTC m=+158.886550873 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.087306 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.169600 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.170116 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.670100502 +0000 UTC m=+158.990311252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.209152 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.273229 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.273612 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.773597571 +0000 UTC m=+159.093808311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.374565 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.375066 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.875050695 +0000 UTC m=+159.195261445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.468436 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4jlk2"] Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.477335 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.480194 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:58.978062026 +0000 UTC m=+159.298272776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.496808 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerStarted","Data":"b8c011a65900b8556f0c217029c714c88d087ea2cf07558cab45feb99d2f5755"} Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.500055 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" event={"ID":"de21f9aa-1450-423f-93f7-75b6ca444f9f","Type":"ContainerStarted","Data":"965be2d7b8527f0c26f07dfc4ceea52b6364cd0e615ae806bbd80680cd749d35"} Dec 10 10:47:58 crc kubenswrapper[4682]: W1210 10:47:58.504061 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ec82d4e_7aac_438d_ada1_ec31302939a7.slice/crio-c1c6856bc4df669c562405ad6ee03f0bb4cacc1dc0459b6be5132d3893b11e05 WatchSource:0}: Error finding container c1c6856bc4df669c562405ad6ee03f0bb4cacc1dc0459b6be5132d3893b11e05: Status 404 returned error can't find the container with id c1c6856bc4df669c562405ad6ee03f0bb4cacc1dc0459b6be5132d3893b11e05 Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.506509 4682 generic.go:334] "Generic (PLEG): container finished" podID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerID="3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4" exitCode=0 Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.506577 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerDied","Data":"3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4"} Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.534149 4682 generic.go:334] "Generic (PLEG): container finished" podID="06e73e24-a522-4e08-98e0-5199a83b016f" containerID="56d6f6fd46717957850eb0e48e505254bbfbefdfbecc8bad63ffcb4956584678" exitCode=0 Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.534556 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerDied","Data":"56d6f6fd46717957850eb0e48e505254bbfbefdfbecc8bad63ffcb4956584678"} Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.534584 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerStarted","Data":"299e66b980c5e1ebf6deb53390f7daa597da1ec494e4acdea5a5061e1f891f04"} Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.535098 4682 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-t9w8x container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.535130 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.581878 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.583229 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.083216985 +0000 UTC m=+159.403427725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.687976 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.701888 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.201831461 +0000 UTC m=+159.522042211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.799291 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.799611 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.299595419 +0000 UTC m=+159.619806169 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.907259 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:58 crc kubenswrapper[4682]: E1210 10:47:58.907897 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.407873954 +0000 UTC m=+159.728084704 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.941118 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:58 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:58 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:58 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:58 crc kubenswrapper[4682]: I1210 10:47:58.941185 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.013914 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.014833 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.51481779 +0000 UTC m=+159.835028540 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.015155 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.046148 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.120672 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.120946 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.620925625 +0000 UTC m=+159.941136375 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.221914 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.222383 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.722363088 +0000 UTC m=+160.042573838 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.323281 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.323448 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.823411307 +0000 UTC m=+160.143622057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.323820 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.324424 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.824415273 +0000 UTC m=+160.144626023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.367252 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.417772 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jxzps"] Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.424402 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.424483 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fe39f56-5b24-4b88-9cd6-02458b68986d-secret-volume\") pod \"8fe39f56-5b24-4b88-9cd6-02458b68986d\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.424518 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkkw5\" (UniqueName: \"kubernetes.io/projected/8fe39f56-5b24-4b88-9cd6-02458b68986d-kube-api-access-qkkw5\") pod \"8fe39f56-5b24-4b88-9cd6-02458b68986d\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.424549 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fe39f56-5b24-4b88-9cd6-02458b68986d-config-volume\") pod \"8fe39f56-5b24-4b88-9cd6-02458b68986d\" (UID: \"8fe39f56-5b24-4b88-9cd6-02458b68986d\") " Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.425700 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fe39f56-5b24-4b88-9cd6-02458b68986d-config-volume" (OuterVolumeSpecName: "config-volume") pod "8fe39f56-5b24-4b88-9cd6-02458b68986d" (UID: "8fe39f56-5b24-4b88-9cd6-02458b68986d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.426118 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:47:59.926071915 +0000 UTC m=+160.246282675 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.433610 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fe39f56-5b24-4b88-9cd6-02458b68986d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8fe39f56-5b24-4b88-9cd6-02458b68986d" (UID: "8fe39f56-5b24-4b88-9cd6-02458b68986d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.435888 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fe39f56-5b24-4b88-9cd6-02458b68986d-kube-api-access-qkkw5" (OuterVolumeSpecName: "kube-api-access-qkkw5") pod "8fe39f56-5b24-4b88-9cd6-02458b68986d" (UID: "8fe39f56-5b24-4b88-9cd6-02458b68986d"). InnerVolumeSpecName "kube-api-access-qkkw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.529154 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.529295 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8fe39f56-5b24-4b88-9cd6-02458b68986d-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.529317 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8fe39f56-5b24-4b88-9cd6-02458b68986d-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.529332 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkkw5\" (UniqueName: \"kubernetes.io/projected/8fe39f56-5b24-4b88-9cd6-02458b68986d-kube-api-access-qkkw5\") on node \"crc\" DevicePath \"\"" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.529609 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.029596685 +0000 UTC m=+160.349807435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.546206 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df25cd5b-b701-45e7-852f-f88e30ed44c3","Type":"ContainerStarted","Data":"5fee222ea39ba984f6a1b24aef7ed4a4efeb6169b214fdd790fa5058e64aeba6"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.548003 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" event={"ID":"8fe39f56-5b24-4b88-9cd6-02458b68986d","Type":"ContainerDied","Data":"31eeb1495fb59ad8f71ac32f1f8194f1ffead8fb256c5bca3005f3a0f0abc203"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.548042 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.548049 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31eeb1495fb59ad8f71ac32f1f8194f1ffead8fb256c5bca3005f3a0f0abc203" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.551138 4682 generic.go:334] "Generic (PLEG): container finished" podID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerID="0fd09a16e86537325ceb33596a176c2d7cf5617f6d69ab5cf0dad23bcdb77797" exitCode=0 Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.551211 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerDied","Data":"0fd09a16e86537325ceb33596a176c2d7cf5617f6d69ab5cf0dad23bcdb77797"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.551237 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerStarted","Data":"c1c6856bc4df669c562405ad6ee03f0bb4cacc1dc0459b6be5132d3893b11e05"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.555023 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" event={"ID":"677d94d3-efad-4264-88fb-cbbacbb2e267","Type":"ContainerStarted","Data":"307c726d67ffcbcf89d102d5e7f0d59487e4c255da93c2e25b44d105406eb9ed"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.557986 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerStarted","Data":"c2ce5964360b39dc79fb3f93cdec99c66192ed729966ac53869427aed1956b9d"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.562775 4682 generic.go:334] "Generic (PLEG): container finished" podID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerID="067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430" exitCode=0 Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.562916 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerDied","Data":"067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430"} Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.591681 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" podStartSLOduration=138.591660479 podStartE2EDuration="2m18.591660479s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:47:59.586925615 +0000 UTC m=+159.907136395" watchObservedRunningTime="2025-12-10 10:47:59.591660479 +0000 UTC m=+159.911871229" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.632404 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.632984 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.132960099 +0000 UTC m=+160.453170849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.734604 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.738574 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.238555566 +0000 UTC m=+160.558766316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.835722 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.835831 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.335810235 +0000 UTC m=+160.656020985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.836027 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.836436 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.336425828 +0000 UTC m=+160.656636578 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.918906 4682 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.932814 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:47:59 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:47:59 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:47:59 crc kubenswrapper[4682]: healthz check failed Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.932896 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.935235 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.936933 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.937260 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.437230937 +0000 UTC m=+160.757441687 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:47:59 crc kubenswrapper[4682]: I1210 10:47:59.937599 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:47:59 crc kubenswrapper[4682]: E1210 10:47:59.938045 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.438033446 +0000 UTC m=+160.758244196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.038992 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.040131 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.540111824 +0000 UTC m=+160.860322574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.142677 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.143710 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.643683655 +0000 UTC m=+160.963894595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.246329 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.247751 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.747625461 +0000 UTC m=+161.067836211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.350068 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.354171 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.854016116 +0000 UTC m=+161.174226866 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.468742 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.469199 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:00.969183475 +0000 UTC m=+161.289394225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.569699 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.570181 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.07013736 +0000 UTC m=+161.390348110 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.583344 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" event={"ID":"677d94d3-efad-4264-88fb-cbbacbb2e267","Type":"ContainerStarted","Data":"630a739357f73361167c6ce1b7c91caa25c93b06686acf40c845ef4fcbaade62"} Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.583390 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" event={"ID":"677d94d3-efad-4264-88fb-cbbacbb2e267","Type":"ContainerStarted","Data":"994d9e5bbc2554e2151eb4fe3c6643772f485b5aba6451f92a324fa4f768db1c"} Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.597707 4682 generic.go:334] "Generic (PLEG): container finished" podID="2e7d49cd-f122-470b-b278-874045f4f089" containerID="e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f" exitCode=0 Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.598017 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerDied","Data":"e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f"} Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.605285 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-jxwjd" podStartSLOduration=12.605268493 podStartE2EDuration="12.605268493s" podCreationTimestamp="2025-12-10 10:47:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:00.603611162 +0000 UTC m=+160.923821932" watchObservedRunningTime="2025-12-10 10:48:00.605268493 +0000 UTC m=+160.925479243" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.609505 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df25cd5b-b701-45e7-852f-f88e30ed44c3","Type":"ContainerStarted","Data":"761f6d62cb6c8e289680b552f28db2f1ec80d88170b85fb71bda4d3fa5108a40"} Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.648621 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.652825 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-ccs9l" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.653797 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.653772898 podStartE2EDuration="3.653772898s" podCreationTimestamp="2025-12-10 10:47:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:00.648711581 +0000 UTC m=+160.968922331" watchObservedRunningTime="2025-12-10 10:48:00.653772898 +0000 UTC m=+160.973983648" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.671222 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.671417 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.171384666 +0000 UTC m=+161.491595426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.671968 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.674436 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.174422078 +0000 UTC m=+161.494632928 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.753059 4682 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-10T10:47:59.919315478Z","Handler":null,"Name":""} Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.773532 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.773682 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.273640909 +0000 UTC m=+161.593851659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.774086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:00 crc kubenswrapper[4682]: E1210 10:48:00.774548 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 10:48:01.274536252 +0000 UTC m=+161.594747002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mpnmc" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.778360 4682 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.778420 4682 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.875064 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.881531 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.881753 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.883251 4682 patch_prober.go:28] interesting pod/console-f9d7485db-ftd94 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.883285 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ftd94" podUID="660474bf-d4be-49dc-b993-5cd3161cb575" containerName="console" probeResult="failure" output="Get \"https://10.217.0.16:8443/health\": dial tcp 10.217.0.16:8443: connect: connection refused" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.886008 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.928696 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.930657 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:00 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:48:00 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:00 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.930711 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.976867 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.985536 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 10:48:00 crc kubenswrapper[4682]: I1210 10:48:00.985577 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.062378 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mpnmc\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.341971 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.348658 4682 patch_prober.go:28] interesting pod/downloads-7954f5f757-7xtlk container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.348713 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7xtlk" podUID="d6434666-a341-4560-a0ff-92d26a79c668" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.348930 4682 patch_prober.go:28] interesting pod/downloads-7954f5f757-7xtlk container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" start-of-body= Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.349021 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7xtlk" podUID="d6434666-a341-4560-a0ff-92d26a79c668" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.33:8080/\": dial tcp 10.217.0.33:8080: connect: connection refused" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.572633 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.572682 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.580705 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.628739 4682 generic.go:334] "Generic (PLEG): container finished" podID="df25cd5b-b701-45e7-852f-f88e30ed44c3" containerID="761f6d62cb6c8e289680b552f28db2f1ec80d88170b85fb71bda4d3fa5108a40" exitCode=0 Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.629832 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df25cd5b-b701-45e7-852f-f88e30ed44c3","Type":"ContainerDied","Data":"761f6d62cb6c8e289680b552f28db2f1ec80d88170b85fb71bda4d3fa5108a40"} Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.641161 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-fl8rc" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.727998 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.932624 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:01 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:48:01 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:01 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:01 crc kubenswrapper[4682]: I1210 10:48:01.932688 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:02 crc kubenswrapper[4682]: I1210 10:48:02.038845 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mpnmc"] Dec 10 10:48:02 crc kubenswrapper[4682]: W1210 10:48:02.136575 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a575832_6a51_4f80_9c12_346c7d4764f2.slice/crio-0703cd0b5fa1b628338be4c352f2588b91ffa1198d548c38da55458e58b879bf WatchSource:0}: Error finding container 0703cd0b5fa1b628338be4c352f2588b91ffa1198d548c38da55458e58b879bf: Status 404 returned error can't find the container with id 0703cd0b5fa1b628338be4c352f2588b91ffa1198d548c38da55458e58b879bf Dec 10 10:48:02 crc kubenswrapper[4682]: I1210 10:48:02.409321 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 10 10:48:02 crc kubenswrapper[4682]: I1210 10:48:02.681149 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" event={"ID":"0a575832-6a51-4f80-9c12-346c7d4764f2","Type":"ContainerStarted","Data":"0703cd0b5fa1b628338be4c352f2588b91ffa1198d548c38da55458e58b879bf"} Dec 10 10:48:02 crc kubenswrapper[4682]: I1210 10:48:02.931977 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:02 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:48:02 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:02 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:02 crc kubenswrapper[4682]: I1210 10:48:02.932035 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.001845 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.122203 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df25cd5b-b701-45e7-852f-f88e30ed44c3-kubelet-dir\") pod \"df25cd5b-b701-45e7-852f-f88e30ed44c3\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.122278 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df25cd5b-b701-45e7-852f-f88e30ed44c3-kube-api-access\") pod \"df25cd5b-b701-45e7-852f-f88e30ed44c3\" (UID: \"df25cd5b-b701-45e7-852f-f88e30ed44c3\") " Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.122308 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/df25cd5b-b701-45e7-852f-f88e30ed44c3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "df25cd5b-b701-45e7-852f-f88e30ed44c3" (UID: "df25cd5b-b701-45e7-852f-f88e30ed44c3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.122583 4682 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df25cd5b-b701-45e7-852f-f88e30ed44c3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.156098 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df25cd5b-b701-45e7-852f-f88e30ed44c3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "df25cd5b-b701-45e7-852f-f88e30ed44c3" (UID: "df25cd5b-b701-45e7-852f-f88e30ed44c3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.223926 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df25cd5b-b701-45e7-852f-f88e30ed44c3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.443792 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-gs4k5" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.632713 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.649696 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f308e36d-4856-4306-adec-390e40daaee3-metrics-certs\") pod \"network-metrics-daemon-6c5qg\" (UID: \"f308e36d-4856-4306-adec-390e40daaee3\") " pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.701205 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.701410 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df25cd5b-b701-45e7-852f-f88e30ed44c3","Type":"ContainerDied","Data":"5fee222ea39ba984f6a1b24aef7ed4a4efeb6169b214fdd790fa5058e64aeba6"} Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.701936 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fee222ea39ba984f6a1b24aef7ed4a4efeb6169b214fdd790fa5058e64aeba6" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.704752 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6c5qg" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.711937 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" event={"ID":"0a575832-6a51-4f80-9c12-346c7d4764f2","Type":"ContainerStarted","Data":"22898a074661b05f90118ce18378435473ec705902cf6ae3f2e3347de54e6db6"} Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.712130 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.940948 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:03 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:48:03 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:03 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:03 crc kubenswrapper[4682]: I1210 10:48:03.941070 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.187523 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" podStartSLOduration=143.187457737 podStartE2EDuration="2m23.187457737s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:03.736245122 +0000 UTC m=+164.056455892" watchObservedRunningTime="2025-12-10 10:48:04.187457737 +0000 UTC m=+164.507668497" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.192647 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 10:48:04 crc kubenswrapper[4682]: E1210 10:48:04.193113 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe39f56-5b24-4b88-9cd6-02458b68986d" containerName="collect-profiles" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.193129 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe39f56-5b24-4b88-9cd6-02458b68986d" containerName="collect-profiles" Dec 10 10:48:04 crc kubenswrapper[4682]: E1210 10:48:04.193141 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df25cd5b-b701-45e7-852f-f88e30ed44c3" containerName="pruner" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.193147 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="df25cd5b-b701-45e7-852f-f88e30ed44c3" containerName="pruner" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.193240 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe39f56-5b24-4b88-9cd6-02458b68986d" containerName="collect-profiles" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.193250 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="df25cd5b-b701-45e7-852f-f88e30ed44c3" containerName="pruner" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.193833 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.203233 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.204235 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.205890 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.348628 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73e0e17b-654b-4a00-a641-5b435753dfa5-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.350219 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73e0e17b-654b-4a00-a641-5b435753dfa5-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.452168 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73e0e17b-654b-4a00-a641-5b435753dfa5-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.452441 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73e0e17b-654b-4a00-a641-5b435753dfa5-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.452783 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73e0e17b-654b-4a00-a641-5b435753dfa5-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.475298 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73e0e17b-654b-4a00-a641-5b435753dfa5-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.530456 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.545659 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-6c5qg"] Dec 10 10:48:04 crc kubenswrapper[4682]: W1210 10:48:04.640226 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf308e36d_4856_4306_adec_390e40daaee3.slice/crio-10dec901c48cd8686f1cc5f7eb3286e4b9d9365e7a0d32d68ca5d100f5ac827a WatchSource:0}: Error finding container 10dec901c48cd8686f1cc5f7eb3286e4b9d9365e7a0d32d68ca5d100f5ac827a: Status 404 returned error can't find the container with id 10dec901c48cd8686f1cc5f7eb3286e4b9d9365e7a0d32d68ca5d100f5ac827a Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.727973 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" event={"ID":"f308e36d-4856-4306-adec-390e40daaee3","Type":"ContainerStarted","Data":"10dec901c48cd8686f1cc5f7eb3286e4b9d9365e7a0d32d68ca5d100f5ac827a"} Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.910903 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.935623 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:04 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:48:04 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:04 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:04 crc kubenswrapper[4682]: I1210 10:48:04.935711 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:05 crc kubenswrapper[4682]: W1210 10:48:05.076090 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod73e0e17b_654b_4a00_a641_5b435753dfa5.slice/crio-79b992c2c03ba9d9309bd70c9828c4675e263fb8dfb233d4d147ca54270e3be4 WatchSource:0}: Error finding container 79b992c2c03ba9d9309bd70c9828c4675e263fb8dfb233d4d147ca54270e3be4: Status 404 returned error can't find the container with id 79b992c2c03ba9d9309bd70c9828c4675e263fb8dfb233d4d147ca54270e3be4 Dec 10 10:48:05 crc kubenswrapper[4682]: I1210 10:48:05.761347 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"73e0e17b-654b-4a00-a641-5b435753dfa5","Type":"ContainerStarted","Data":"79b992c2c03ba9d9309bd70c9828c4675e263fb8dfb233d4d147ca54270e3be4"} Dec 10 10:48:05 crc kubenswrapper[4682]: I1210 10:48:05.932039 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:05 crc kubenswrapper[4682]: [-]has-synced failed: reason withheld Dec 10 10:48:05 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:05 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:05 crc kubenswrapper[4682]: I1210 10:48:05.932116 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.478606 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.478702 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.775324 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"73e0e17b-654b-4a00-a641-5b435753dfa5","Type":"ContainerStarted","Data":"87c07965de2ac5e612d99e1b69bc1b92ae0cb3655558c7c5d7903917f77d5119"} Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.776790 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" event={"ID":"f308e36d-4856-4306-adec-390e40daaee3","Type":"ContainerStarted","Data":"8e6e19b62f09f4aa946527604738d2eb30a07c1411e2868c850a9f4dfff606d5"} Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.792061 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.792045283 podStartE2EDuration="2.792045283s" podCreationTimestamp="2025-12-10 10:48:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:06.790291309 +0000 UTC m=+167.110502059" watchObservedRunningTime="2025-12-10 10:48:06.792045283 +0000 UTC m=+167.112256033" Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.932426 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 10:48:06 crc kubenswrapper[4682]: [+]has-synced ok Dec 10 10:48:06 crc kubenswrapper[4682]: [+]process-running ok Dec 10 10:48:06 crc kubenswrapper[4682]: healthz check failed Dec 10 10:48:06 crc kubenswrapper[4682]: I1210 10:48:06.932499 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 10:48:07 crc kubenswrapper[4682]: I1210 10:48:07.785648 4682 generic.go:334] "Generic (PLEG): container finished" podID="73e0e17b-654b-4a00-a641-5b435753dfa5" containerID="87c07965de2ac5e612d99e1b69bc1b92ae0cb3655558c7c5d7903917f77d5119" exitCode=0 Dec 10 10:48:07 crc kubenswrapper[4682]: I1210 10:48:07.785687 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"73e0e17b-654b-4a00-a641-5b435753dfa5","Type":"ContainerDied","Data":"87c07965de2ac5e612d99e1b69bc1b92ae0cb3655558c7c5d7903917f77d5119"} Dec 10 10:48:07 crc kubenswrapper[4682]: I1210 10:48:07.932568 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:48:07 crc kubenswrapper[4682]: I1210 10:48:07.935389 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-jfqfn" Dec 10 10:48:10 crc kubenswrapper[4682]: I1210 10:48:10.997357 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:48:11 crc kubenswrapper[4682]: I1210 10:48:11.001580 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 10:48:11 crc kubenswrapper[4682]: I1210 10:48:11.353245 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7xtlk" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.272219 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.379218 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73e0e17b-654b-4a00-a641-5b435753dfa5-kube-api-access\") pod \"73e0e17b-654b-4a00-a641-5b435753dfa5\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.379384 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73e0e17b-654b-4a00-a641-5b435753dfa5-kubelet-dir\") pod \"73e0e17b-654b-4a00-a641-5b435753dfa5\" (UID: \"73e0e17b-654b-4a00-a641-5b435753dfa5\") " Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.379558 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/73e0e17b-654b-4a00-a641-5b435753dfa5-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "73e0e17b-654b-4a00-a641-5b435753dfa5" (UID: "73e0e17b-654b-4a00-a641-5b435753dfa5"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.387554 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73e0e17b-654b-4a00-a641-5b435753dfa5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "73e0e17b-654b-4a00-a641-5b435753dfa5" (UID: "73e0e17b-654b-4a00-a641-5b435753dfa5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.480412 4682 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/73e0e17b-654b-4a00-a641-5b435753dfa5-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.480451 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73e0e17b-654b-4a00-a641-5b435753dfa5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.823300 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"73e0e17b-654b-4a00-a641-5b435753dfa5","Type":"ContainerDied","Data":"79b992c2c03ba9d9309bd70c9828c4675e263fb8dfb233d4d147ca54270e3be4"} Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.823341 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79b992c2c03ba9d9309bd70c9828c4675e263fb8dfb233d4d147ca54270e3be4" Dec 10 10:48:12 crc kubenswrapper[4682]: I1210 10:48:12.823352 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 10:48:21 crc kubenswrapper[4682]: I1210 10:48:21.350635 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:48:27 crc kubenswrapper[4682]: I1210 10:48:27.712667 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 10:48:31 crc kubenswrapper[4682]: I1210 10:48:31.650733 4682 patch_prober.go:28] interesting pod/authentication-operator-69f744f599-ghhnn container/authentication-operator namespace/openshift-authentication-operator: Liveness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 10:48:31 crc kubenswrapper[4682]: I1210 10:48:31.650849 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication-operator/authentication-operator-69f744f599-ghhnn" podUID="a7269eba-82ff-4387-a35a-767850aa52d7" containerName="authentication-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 10:48:31 crc kubenswrapper[4682]: I1210 10:48:31.703151 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bc6vq" Dec 10 10:48:34 crc kubenswrapper[4682]: E1210 10:48:34.432105 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 10:48:34 crc kubenswrapper[4682]: E1210 10:48:34.432596 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2224j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-4jlk2_openshift-marketplace(7ec82d4e-7aac-438d-ada1-ec31302939a7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:34 crc kubenswrapper[4682]: E1210 10:48:34.434046 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-4jlk2" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.987460 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 10:48:35 crc kubenswrapper[4682]: E1210 10:48:35.988306 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73e0e17b-654b-4a00-a641-5b435753dfa5" containerName="pruner" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.988328 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="73e0e17b-654b-4a00-a641-5b435753dfa5" containerName="pruner" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.988434 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="73e0e17b-654b-4a00-a641-5b435753dfa5" containerName="pruner" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.988834 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.991798 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.992899 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 10 10:48:35 crc kubenswrapper[4682]: I1210 10:48:35.993672 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.106496 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e9c0a192-28a7-434f-bc5c-c4680e687a76-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.106586 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c0a192-28a7-434f-bc5c-c4680e687a76-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.208115 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c0a192-28a7-434f-bc5c-c4680e687a76-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.208608 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e9c0a192-28a7-434f-bc5c-c4680e687a76-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.208756 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e9c0a192-28a7-434f-bc5c-c4680e687a76-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.246312 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c0a192-28a7-434f-bc5c-c4680e687a76-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.311580 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.478608 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:48:36 crc kubenswrapper[4682]: I1210 10:48:36.478672 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:48:37 crc kubenswrapper[4682]: E1210 10:48:37.685523 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-4jlk2" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" Dec 10 10:48:37 crc kubenswrapper[4682]: E1210 10:48:37.913134 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 10 10:48:37 crc kubenswrapper[4682]: E1210 10:48:37.913387 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-22bfm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-lnqbd_openshift-marketplace(3ee34116-c378-4109-a0a2-e5ea084c98ad): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:37 crc kubenswrapper[4682]: E1210 10:48:37.914682 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-lnqbd" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" Dec 10 10:48:40 crc kubenswrapper[4682]: E1210 10:48:40.384438 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-lnqbd" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.570839 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.572000 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.583969 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.680034 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.680145 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-var-lock\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.680260 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d666f615-9508-4824-830c-4b56aec338c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.782446 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.782562 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-var-lock\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.782567 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.782663 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d666f615-9508-4824-830c-4b56aec338c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.782710 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-var-lock\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.811274 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d666f615-9508-4824-830c-4b56aec338c0-kube-api-access\") pod \"installer-9-crc\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:41 crc kubenswrapper[4682]: I1210 10:48:41.891410 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:48:43 crc kubenswrapper[4682]: E1210 10:48:43.882201 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 10:48:43 crc kubenswrapper[4682]: E1210 10:48:43.882669 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m72h6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-rjvnf_openshift-marketplace(abde8dd6-2027-45fa-9052-e619c5cadecf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:43 crc kubenswrapper[4682]: E1210 10:48:43.883830 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-rjvnf" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.467175 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-rjvnf" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.552126 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.552423 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9l2hq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-vv9wv_openshift-marketplace(97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.553709 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-vv9wv" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.569659 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.569849 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.570526 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6hb5j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-skvvg_openshift-marketplace(81310b9c-2d81-4693-afa2-14bfa74e3bc9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.570670 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6gq96,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-jxzps_openshift-marketplace(2e7d49cd-f122-470b-b278-874045f4f089): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.571697 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-skvvg" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.575071 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-jxzps" podUID="2e7d49cd-f122-470b-b278-874045f4f089" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.612509 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.612662 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f7nl5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-j6tv8_openshift-marketplace(87412cec-b4af-4f63-a127-4ba4214d57b8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.613846 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-j6tv8" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.623692 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.623841 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xqm7j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-gp62l_openshift-marketplace(06e73e24-a522-4e08-98e0-5199a83b016f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:48:46 crc kubenswrapper[4682]: E1210 10:48:46.625065 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-gp62l" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" Dec 10 10:48:46 crc kubenswrapper[4682]: I1210 10:48:46.935263 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 10:48:46 crc kubenswrapper[4682]: I1210 10:48:46.981455 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 10:48:46 crc kubenswrapper[4682]: W1210 10:48:46.992433 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pode9c0a192_28a7_434f_bc5c_c4680e687a76.slice/crio-db23cbd1a41671452784960dbaa92a3c8fd674ccd653d7221a5310fb3ef498b4 WatchSource:0}: Error finding container db23cbd1a41671452784960dbaa92a3c8fd674ccd653d7221a5310fb3ef498b4: Status 404 returned error can't find the container with id db23cbd1a41671452784960dbaa92a3c8fd674ccd653d7221a5310fb3ef498b4 Dec 10 10:48:47 crc kubenswrapper[4682]: I1210 10:48:47.042268 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6c5qg" event={"ID":"f308e36d-4856-4306-adec-390e40daaee3","Type":"ContainerStarted","Data":"37ffae2c8bcce5151eafe7b05d0e2cd7e532465fb5aead459ba129668a17c609"} Dec 10 10:48:47 crc kubenswrapper[4682]: I1210 10:48:47.053056 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d666f615-9508-4824-830c-4b56aec338c0","Type":"ContainerStarted","Data":"b2c26f77ee0f92f9003b95ba89aaf9d63ab7a0121938109f4d8766e73c33e1a8"} Dec 10 10:48:47 crc kubenswrapper[4682]: I1210 10:48:47.054535 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e9c0a192-28a7-434f-bc5c-c4680e687a76","Type":"ContainerStarted","Data":"db23cbd1a41671452784960dbaa92a3c8fd674ccd653d7221a5310fb3ef498b4"} Dec 10 10:48:47 crc kubenswrapper[4682]: E1210 10:48:47.056325 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-vv9wv" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" Dec 10 10:48:47 crc kubenswrapper[4682]: E1210 10:48:47.057576 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-skvvg" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" Dec 10 10:48:47 crc kubenswrapper[4682]: E1210 10:48:47.057918 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-jxzps" podUID="2e7d49cd-f122-470b-b278-874045f4f089" Dec 10 10:48:47 crc kubenswrapper[4682]: E1210 10:48:47.057974 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-j6tv8" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" Dec 10 10:48:47 crc kubenswrapper[4682]: I1210 10:48:47.089096 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-6c5qg" podStartSLOduration=186.089074358 podStartE2EDuration="3m6.089074358s" podCreationTimestamp="2025-12-10 10:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:47.065815325 +0000 UTC m=+207.386026105" watchObservedRunningTime="2025-12-10 10:48:47.089074358 +0000 UTC m=+207.409285108" Dec 10 10:48:48 crc kubenswrapper[4682]: I1210 10:48:48.060836 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d666f615-9508-4824-830c-4b56aec338c0","Type":"ContainerStarted","Data":"c61ea1478c9bf6a848f45e778e7f1d79309db2817008e5e38adef419455e367a"} Dec 10 10:48:48 crc kubenswrapper[4682]: I1210 10:48:48.062676 4682 generic.go:334] "Generic (PLEG): container finished" podID="e9c0a192-28a7-434f-bc5c-c4680e687a76" containerID="ee58cf9a96011166bb31d1505e3372a9dec5c8a59707266500c72a7e504787e1" exitCode=0 Dec 10 10:48:48 crc kubenswrapper[4682]: I1210 10:48:48.062718 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e9c0a192-28a7-434f-bc5c-c4680e687a76","Type":"ContainerDied","Data":"ee58cf9a96011166bb31d1505e3372a9dec5c8a59707266500c72a7e504787e1"} Dec 10 10:48:48 crc kubenswrapper[4682]: I1210 10:48:48.075573 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=7.07555276 podStartE2EDuration="7.07555276s" podCreationTimestamp="2025-12-10 10:48:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:48:48.072964025 +0000 UTC m=+208.393174775" watchObservedRunningTime="2025-12-10 10:48:48.07555276 +0000 UTC m=+208.395763510" Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.070012 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerStarted","Data":"d0733ec6cd51d18dd82306854cf92baf4ecf46c65de97d3ff1a89c8a0e2677a9"} Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.327389 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.415560 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c0a192-28a7-434f-bc5c-c4680e687a76-kube-api-access\") pod \"e9c0a192-28a7-434f-bc5c-c4680e687a76\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.415735 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e9c0a192-28a7-434f-bc5c-c4680e687a76-kubelet-dir\") pod \"e9c0a192-28a7-434f-bc5c-c4680e687a76\" (UID: \"e9c0a192-28a7-434f-bc5c-c4680e687a76\") " Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.415878 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e9c0a192-28a7-434f-bc5c-c4680e687a76-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e9c0a192-28a7-434f-bc5c-c4680e687a76" (UID: "e9c0a192-28a7-434f-bc5c-c4680e687a76"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.416139 4682 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e9c0a192-28a7-434f-bc5c-c4680e687a76-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.423553 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9c0a192-28a7-434f-bc5c-c4680e687a76-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e9c0a192-28a7-434f-bc5c-c4680e687a76" (UID: "e9c0a192-28a7-434f-bc5c-c4680e687a76"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:48:49 crc kubenswrapper[4682]: I1210 10:48:49.516746 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e9c0a192-28a7-434f-bc5c-c4680e687a76-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:48:50 crc kubenswrapper[4682]: I1210 10:48:50.079123 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e9c0a192-28a7-434f-bc5c-c4680e687a76","Type":"ContainerDied","Data":"db23cbd1a41671452784960dbaa92a3c8fd674ccd653d7221a5310fb3ef498b4"} Dec 10 10:48:50 crc kubenswrapper[4682]: I1210 10:48:50.079442 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db23cbd1a41671452784960dbaa92a3c8fd674ccd653d7221a5310fb3ef498b4" Dec 10 10:48:50 crc kubenswrapper[4682]: I1210 10:48:50.079191 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 10:48:50 crc kubenswrapper[4682]: I1210 10:48:50.082272 4682 generic.go:334] "Generic (PLEG): container finished" podID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerID="d0733ec6cd51d18dd82306854cf92baf4ecf46c65de97d3ff1a89c8a0e2677a9" exitCode=0 Dec 10 10:48:50 crc kubenswrapper[4682]: I1210 10:48:50.082314 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerDied","Data":"d0733ec6cd51d18dd82306854cf92baf4ecf46c65de97d3ff1a89c8a0e2677a9"} Dec 10 10:48:51 crc kubenswrapper[4682]: I1210 10:48:51.090350 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerStarted","Data":"b74aca3d602de956224fdc13b9befbe10b85daa875b0b2c34a8dde450ef39dff"} Dec 10 10:48:51 crc kubenswrapper[4682]: I1210 10:48:51.108885 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4jlk2" podStartSLOduration=3.092394881 podStartE2EDuration="54.108867366s" podCreationTimestamp="2025-12-10 10:47:57 +0000 UTC" firstStartedPulling="2025-12-10 10:47:59.553278937 +0000 UTC m=+159.873489687" lastFinishedPulling="2025-12-10 10:48:50.569751422 +0000 UTC m=+210.889962172" observedRunningTime="2025-12-10 10:48:51.105725483 +0000 UTC m=+211.425936243" watchObservedRunningTime="2025-12-10 10:48:51.108867366 +0000 UTC m=+211.429078106" Dec 10 10:48:55 crc kubenswrapper[4682]: I1210 10:48:55.112922 4682 generic.go:334] "Generic (PLEG): container finished" podID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerID="859ec75704fc1e1044f01a1d231290080b1697a47cd64751bc4355c381910995" exitCode=0 Dec 10 10:48:55 crc kubenswrapper[4682]: I1210 10:48:55.112998 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lnqbd" event={"ID":"3ee34116-c378-4109-a0a2-e5ea084c98ad","Type":"ContainerDied","Data":"859ec75704fc1e1044f01a1d231290080b1697a47cd64751bc4355c381910995"} Dec 10 10:48:57 crc kubenswrapper[4682]: I1210 10:48:57.125995 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lnqbd" event={"ID":"3ee34116-c378-4109-a0a2-e5ea084c98ad","Type":"ContainerStarted","Data":"9a8678ea12f8ba4f7e351f44986fcce02eb81b668338871538acd709d7692fc0"} Dec 10 10:48:57 crc kubenswrapper[4682]: I1210 10:48:57.146853 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lnqbd" podStartSLOduration=4.414153964 podStartE2EDuration="1m4.146837141s" podCreationTimestamp="2025-12-10 10:47:53 +0000 UTC" firstStartedPulling="2025-12-10 10:47:57.132562797 +0000 UTC m=+157.452773547" lastFinishedPulling="2025-12-10 10:48:56.865245974 +0000 UTC m=+217.185456724" observedRunningTime="2025-12-10 10:48:57.144194954 +0000 UTC m=+217.464405694" watchObservedRunningTime="2025-12-10 10:48:57.146837141 +0000 UTC m=+217.467047891" Dec 10 10:48:57 crc kubenswrapper[4682]: I1210 10:48:57.425450 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:48:57 crc kubenswrapper[4682]: I1210 10:48:57.425518 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:48:57 crc kubenswrapper[4682]: I1210 10:48:57.516398 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:48:58 crc kubenswrapper[4682]: I1210 10:48:58.174540 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:49:00 crc kubenswrapper[4682]: I1210 10:49:00.143216 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerStarted","Data":"1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22"} Dec 10 10:49:00 crc kubenswrapper[4682]: I1210 10:49:00.145742 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerStarted","Data":"309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f"} Dec 10 10:49:01 crc kubenswrapper[4682]: I1210 10:49:01.153661 4682 generic.go:334] "Generic (PLEG): container finished" podID="2e7d49cd-f122-470b-b278-874045f4f089" containerID="309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f" exitCode=0 Dec 10 10:49:01 crc kubenswrapper[4682]: I1210 10:49:01.153724 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerDied","Data":"309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f"} Dec 10 10:49:01 crc kubenswrapper[4682]: I1210 10:49:01.158821 4682 generic.go:334] "Generic (PLEG): container finished" podID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerID="1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22" exitCode=0 Dec 10 10:49:01 crc kubenswrapper[4682]: I1210 10:49:01.158856 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerDied","Data":"1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22"} Dec 10 10:49:03 crc kubenswrapper[4682]: I1210 10:49:03.170209 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerStarted","Data":"71a8e4dc29d3f2177022b979577dd11ccadad7720ce226a24c68c32ff777a446"} Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.188769 4682 generic.go:334] "Generic (PLEG): container finished" podID="06e73e24-a522-4e08-98e0-5199a83b016f" containerID="71a8e4dc29d3f2177022b979577dd11ccadad7720ce226a24c68c32ff777a446" exitCode=0 Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.188834 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerDied","Data":"71a8e4dc29d3f2177022b979577dd11ccadad7720ce226a24c68c32ff777a446"} Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.192104 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerStarted","Data":"17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360"} Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.196709 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerStarted","Data":"472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c"} Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.202523 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerStarted","Data":"f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8"} Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.257996 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-skvvg" podStartSLOduration=4.364274669 podStartE2EDuration="1m10.257977358s" podCreationTimestamp="2025-12-10 10:47:54 +0000 UTC" firstStartedPulling="2025-12-10 10:47:57.381205509 +0000 UTC m=+157.701416259" lastFinishedPulling="2025-12-10 10:49:03.274908198 +0000 UTC m=+223.595118948" observedRunningTime="2025-12-10 10:49:04.253974427 +0000 UTC m=+224.574185207" watchObservedRunningTime="2025-12-10 10:49:04.257977358 +0000 UTC m=+224.578188108" Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.276070 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jxzps" podStartSLOduration=4.781136013 podStartE2EDuration="1m7.276053631s" podCreationTimestamp="2025-12-10 10:47:57 +0000 UTC" firstStartedPulling="2025-12-10 10:48:00.615710547 +0000 UTC m=+160.935921297" lastFinishedPulling="2025-12-10 10:49:03.110628165 +0000 UTC m=+223.430838915" observedRunningTime="2025-12-10 10:49:04.271742 +0000 UTC m=+224.591952770" watchObservedRunningTime="2025-12-10 10:49:04.276053631 +0000 UTC m=+224.596264371" Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.957135 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:49:04 crc kubenswrapper[4682]: I1210 10:49:04.957184 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:49:05 crc kubenswrapper[4682]: I1210 10:49:05.022440 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:49:05 crc kubenswrapper[4682]: I1210 10:49:05.207074 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:49:05 crc kubenswrapper[4682]: I1210 10:49:05.207123 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:49:05 crc kubenswrapper[4682]: I1210 10:49:05.217267 4682 generic.go:334] "Generic (PLEG): container finished" podID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerID="17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360" exitCode=0 Dec 10 10:49:05 crc kubenswrapper[4682]: I1210 10:49:05.217366 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerDied","Data":"17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360"} Dec 10 10:49:05 crc kubenswrapper[4682]: I1210 10:49:05.275082 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:49:06 crc kubenswrapper[4682]: I1210 10:49:06.255821 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-skvvg" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="registry-server" probeResult="failure" output=< Dec 10 10:49:06 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 10:49:06 crc kubenswrapper[4682]: > Dec 10 10:49:06 crc kubenswrapper[4682]: I1210 10:49:06.479172 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:49:06 crc kubenswrapper[4682]: I1210 10:49:06.479242 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:49:06 crc kubenswrapper[4682]: I1210 10:49:06.479285 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:49:06 crc kubenswrapper[4682]: I1210 10:49:06.479874 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:49:06 crc kubenswrapper[4682]: I1210 10:49:06.479991 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26" gracePeriod=600 Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.228888 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerStarted","Data":"b354cc0581f8c71948c7ab21185285b0138f320450880a189c24ff674f1911c5"} Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.231569 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerStarted","Data":"0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138"} Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.234755 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerStarted","Data":"c0a5175fd229cf8c9a76d2d8ab7f653c4312747059eadbc9fc1dcf879602ede2"} Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.248271 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26" exitCode=0 Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.248352 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26"} Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.248402 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"6e38945e715c1abac31bb88fcaf30353ca7e19cc11c8812056c7fad1c9342ed1"} Dec 10 10:49:07 crc kubenswrapper[4682]: I1210 10:49:07.321123 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gp62l" podStartSLOduration=4.8238224899999995 podStartE2EDuration="1m12.321105062s" podCreationTimestamp="2025-12-10 10:47:55 +0000 UTC" firstStartedPulling="2025-12-10 10:47:59.565697843 +0000 UTC m=+159.885908593" lastFinishedPulling="2025-12-10 10:49:07.062980415 +0000 UTC m=+227.383191165" observedRunningTime="2025-12-10 10:49:07.319280362 +0000 UTC m=+227.639491132" watchObservedRunningTime="2025-12-10 10:49:07.321105062 +0000 UTC m=+227.641315812" Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.210122 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.210542 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.255978 4682 generic.go:334] "Generic (PLEG): container finished" podID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerID="b354cc0581f8c71948c7ab21185285b0138f320450880a189c24ff674f1911c5" exitCode=0 Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.256070 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerDied","Data":"b354cc0581f8c71948c7ab21185285b0138f320450880a189c24ff674f1911c5"} Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.259305 4682 generic.go:334] "Generic (PLEG): container finished" podID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerID="0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138" exitCode=0 Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.259390 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerDied","Data":"0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138"} Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.264592 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerStarted","Data":"745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947"} Dec 10 10:49:08 crc kubenswrapper[4682]: I1210 10:49:08.305629 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vv9wv" podStartSLOduration=4.233200484 podStartE2EDuration="1m12.30561137s" podCreationTimestamp="2025-12-10 10:47:56 +0000 UTC" firstStartedPulling="2025-12-10 10:47:59.565584819 +0000 UTC m=+159.885795569" lastFinishedPulling="2025-12-10 10:49:07.637995705 +0000 UTC m=+227.958206455" observedRunningTime="2025-12-10 10:49:08.304233054 +0000 UTC m=+228.624443824" watchObservedRunningTime="2025-12-10 10:49:08.30561137 +0000 UTC m=+228.625822120" Dec 10 10:49:09 crc kubenswrapper[4682]: I1210 10:49:09.253209 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jxzps" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="registry-server" probeResult="failure" output=< Dec 10 10:49:09 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 10:49:09 crc kubenswrapper[4682]: > Dec 10 10:49:09 crc kubenswrapper[4682]: I1210 10:49:09.273264 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerStarted","Data":"052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace"} Dec 10 10:49:09 crc kubenswrapper[4682]: I1210 10:49:09.288442 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rjvnf" podStartSLOduration=4.989993209 podStartE2EDuration="1m15.288423872s" podCreationTimestamp="2025-12-10 10:47:54 +0000 UTC" firstStartedPulling="2025-12-10 10:47:58.507922984 +0000 UTC m=+158.828133734" lastFinishedPulling="2025-12-10 10:49:08.806353647 +0000 UTC m=+229.126564397" observedRunningTime="2025-12-10 10:49:09.287689328 +0000 UTC m=+229.607900098" watchObservedRunningTime="2025-12-10 10:49:09.288423872 +0000 UTC m=+229.608634622" Dec 10 10:49:10 crc kubenswrapper[4682]: I1210 10:49:10.282421 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerStarted","Data":"8f0525a2dd2ae201c7299b3546f256fee3ac4b8e1aa4c4b30b16fb3e06052160"} Dec 10 10:49:10 crc kubenswrapper[4682]: I1210 10:49:10.311608 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j6tv8" podStartSLOduration=4.138024536 podStartE2EDuration="1m16.311592876s" podCreationTimestamp="2025-12-10 10:47:54 +0000 UTC" firstStartedPulling="2025-12-10 10:47:57.391189126 +0000 UTC m=+157.711399876" lastFinishedPulling="2025-12-10 10:49:09.564757466 +0000 UTC m=+229.884968216" observedRunningTime="2025-12-10 10:49:10.309221668 +0000 UTC m=+230.629432418" watchObservedRunningTime="2025-12-10 10:49:10.311592876 +0000 UTC m=+230.631803626" Dec 10 10:49:14 crc kubenswrapper[4682]: I1210 10:49:14.962459 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:49:14 crc kubenswrapper[4682]: I1210 10:49:14.963004 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.004624 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.243929 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.244107 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.265494 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.303194 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.317310 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.351768 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:49:15 crc kubenswrapper[4682]: I1210 10:49:15.352537 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:49:16 crc kubenswrapper[4682]: I1210 10:49:16.310230 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:49:16 crc kubenswrapper[4682]: I1210 10:49:16.310274 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:49:16 crc kubenswrapper[4682]: I1210 10:49:16.351365 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:49:16 crc kubenswrapper[4682]: I1210 10:49:16.676726 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:49:16 crc kubenswrapper[4682]: I1210 10:49:16.676794 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:49:16 crc kubenswrapper[4682]: I1210 10:49:16.717539 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:49:17 crc kubenswrapper[4682]: I1210 10:49:17.297946 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rjvnf"] Dec 10 10:49:17 crc kubenswrapper[4682]: I1210 10:49:17.362721 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:49:17 crc kubenswrapper[4682]: I1210 10:49:17.376838 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:49:17 crc kubenswrapper[4682]: I1210 10:49:17.503933 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-skvvg"] Dec 10 10:49:17 crc kubenswrapper[4682]: I1210 10:49:17.504232 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-skvvg" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="registry-server" containerID="cri-o://472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c" gracePeriod=2 Dec 10 10:49:17 crc kubenswrapper[4682]: I1210 10:49:17.884347 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.081221 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-catalog-content\") pod \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.081632 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-utilities\") pod \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.081685 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hb5j\" (UniqueName: \"kubernetes.io/projected/81310b9c-2d81-4693-afa2-14bfa74e3bc9-kube-api-access-6hb5j\") pod \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\" (UID: \"81310b9c-2d81-4693-afa2-14bfa74e3bc9\") " Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.082440 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-utilities" (OuterVolumeSpecName: "utilities") pod "81310b9c-2d81-4693-afa2-14bfa74e3bc9" (UID: "81310b9c-2d81-4693-afa2-14bfa74e3bc9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.087656 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81310b9c-2d81-4693-afa2-14bfa74e3bc9-kube-api-access-6hb5j" (OuterVolumeSpecName: "kube-api-access-6hb5j") pod "81310b9c-2d81-4693-afa2-14bfa74e3bc9" (UID: "81310b9c-2d81-4693-afa2-14bfa74e3bc9"). InnerVolumeSpecName "kube-api-access-6hb5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.137351 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81310b9c-2d81-4693-afa2-14bfa74e3bc9" (UID: "81310b9c-2d81-4693-afa2-14bfa74e3bc9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.182643 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.182694 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81310b9c-2d81-4693-afa2-14bfa74e3bc9-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.182703 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hb5j\" (UniqueName: \"kubernetes.io/projected/81310b9c-2d81-4693-afa2-14bfa74e3bc9-kube-api-access-6hb5j\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.251161 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.287991 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.326888 4682 generic.go:334] "Generic (PLEG): container finished" podID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerID="472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c" exitCode=0 Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.326949 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-skvvg" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.327026 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerDied","Data":"472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c"} Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.327060 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-skvvg" event={"ID":"81310b9c-2d81-4693-afa2-14bfa74e3bc9","Type":"ContainerDied","Data":"eed0cb95d7df5d931e0110e6fb836aeefb352b809dde0fda16bdceb16a2a0dba"} Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.327077 4682 scope.go:117] "RemoveContainer" containerID="472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.327811 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rjvnf" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="registry-server" containerID="cri-o://052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace" gracePeriod=2 Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.342360 4682 scope.go:117] "RemoveContainer" containerID="1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.358251 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-skvvg"] Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.361997 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-skvvg"] Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.387111 4682 scope.go:117] "RemoveContainer" containerID="4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.387388 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" path="/var/lib/kubelet/pods/81310b9c-2d81-4693-afa2-14bfa74e3bc9/volumes" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.460712 4682 scope.go:117] "RemoveContainer" containerID="472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c" Dec 10 10:49:18 crc kubenswrapper[4682]: E1210 10:49:18.461281 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c\": container with ID starting with 472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c not found: ID does not exist" containerID="472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.461345 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c"} err="failed to get container status \"472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c\": rpc error: code = NotFound desc = could not find container \"472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c\": container with ID starting with 472d296f0d8c829c4485710b621c74cb3ea5355a3e7249d999b2617bfd59f95c not found: ID does not exist" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.461378 4682 scope.go:117] "RemoveContainer" containerID="1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22" Dec 10 10:49:18 crc kubenswrapper[4682]: E1210 10:49:18.461632 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22\": container with ID starting with 1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22 not found: ID does not exist" containerID="1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.461657 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22"} err="failed to get container status \"1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22\": rpc error: code = NotFound desc = could not find container \"1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22\": container with ID starting with 1464b12314dec5bfc98e1d99455db368b9299ae1fed5212697f71c6b23ac5c22 not found: ID does not exist" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.461673 4682 scope.go:117] "RemoveContainer" containerID="4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6" Dec 10 10:49:18 crc kubenswrapper[4682]: E1210 10:49:18.462109 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6\": container with ID starting with 4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6 not found: ID does not exist" containerID="4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.462193 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6"} err="failed to get container status \"4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6\": rpc error: code = NotFound desc = could not find container \"4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6\": container with ID starting with 4f6d682e6169a195163a355ae1c94c7bc6c54e70a883d20fb0d55f28cf3946d6 not found: ID does not exist" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.693043 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.889545 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-catalog-content\") pod \"abde8dd6-2027-45fa-9052-e619c5cadecf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.890670 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-utilities\") pod \"abde8dd6-2027-45fa-9052-e619c5cadecf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.890743 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m72h6\" (UniqueName: \"kubernetes.io/projected/abde8dd6-2027-45fa-9052-e619c5cadecf-kube-api-access-m72h6\") pod \"abde8dd6-2027-45fa-9052-e619c5cadecf\" (UID: \"abde8dd6-2027-45fa-9052-e619c5cadecf\") " Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.891516 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-utilities" (OuterVolumeSpecName: "utilities") pod "abde8dd6-2027-45fa-9052-e619c5cadecf" (UID: "abde8dd6-2027-45fa-9052-e619c5cadecf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.894305 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abde8dd6-2027-45fa-9052-e619c5cadecf-kube-api-access-m72h6" (OuterVolumeSpecName: "kube-api-access-m72h6") pod "abde8dd6-2027-45fa-9052-e619c5cadecf" (UID: "abde8dd6-2027-45fa-9052-e619c5cadecf"). InnerVolumeSpecName "kube-api-access-m72h6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.941218 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "abde8dd6-2027-45fa-9052-e619c5cadecf" (UID: "abde8dd6-2027-45fa-9052-e619c5cadecf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.992557 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.992599 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m72h6\" (UniqueName: \"kubernetes.io/projected/abde8dd6-2027-45fa-9052-e619c5cadecf-kube-api-access-m72h6\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:18 crc kubenswrapper[4682]: I1210 10:49:18.992636 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abde8dd6-2027-45fa-9052-e619c5cadecf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.335595 4682 generic.go:334] "Generic (PLEG): container finished" podID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerID="052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace" exitCode=0 Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.335638 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerDied","Data":"052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace"} Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.335668 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rjvnf" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.335683 4682 scope.go:117] "RemoveContainer" containerID="052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.335670 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rjvnf" event={"ID":"abde8dd6-2027-45fa-9052-e619c5cadecf","Type":"ContainerDied","Data":"1175bfe622751cdbd3f521a40b5beae305653b71136601e4cfd06adcb7464181"} Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.352090 4682 scope.go:117] "RemoveContainer" containerID="0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.363490 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rjvnf"] Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.367599 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rjvnf"] Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.389860 4682 scope.go:117] "RemoveContainer" containerID="3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.407826 4682 scope.go:117] "RemoveContainer" containerID="052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace" Dec 10 10:49:19 crc kubenswrapper[4682]: E1210 10:49:19.408272 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace\": container with ID starting with 052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace not found: ID does not exist" containerID="052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.408316 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace"} err="failed to get container status \"052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace\": rpc error: code = NotFound desc = could not find container \"052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace\": container with ID starting with 052409b335b893a8e3ecb29ddc6cedcf583be67501753ddd9ecd1329aee3dace not found: ID does not exist" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.408536 4682 scope.go:117] "RemoveContainer" containerID="0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138" Dec 10 10:49:19 crc kubenswrapper[4682]: E1210 10:49:19.408943 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138\": container with ID starting with 0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138 not found: ID does not exist" containerID="0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.408979 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138"} err="failed to get container status \"0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138\": rpc error: code = NotFound desc = could not find container \"0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138\": container with ID starting with 0beedaf7086d3cda843e52d4f442d824ff11ed192a5eb0cffa72baa44c6e6138 not found: ID does not exist" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.408997 4682 scope.go:117] "RemoveContainer" containerID="3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4" Dec 10 10:49:19 crc kubenswrapper[4682]: E1210 10:49:19.409250 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4\": container with ID starting with 3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4 not found: ID does not exist" containerID="3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.409280 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4"} err="failed to get container status \"3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4\": rpc error: code = NotFound desc = could not find container \"3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4\": container with ID starting with 3c7e35e482bd85d255e4030a98497847d21ed6cd3f34ed859abeb8d0c1430da4 not found: ID does not exist" Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.704934 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv9wv"] Dec 10 10:49:19 crc kubenswrapper[4682]: I1210 10:49:19.705571 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vv9wv" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="registry-server" containerID="cri-o://745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947" gracePeriod=2 Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.036561 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.208309 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-catalog-content\") pod \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.208608 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-utilities\") pod \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.208713 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9l2hq\" (UniqueName: \"kubernetes.io/projected/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-kube-api-access-9l2hq\") pod \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\" (UID: \"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9\") " Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.209855 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-utilities" (OuterVolumeSpecName: "utilities") pod "97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" (UID: "97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.212403 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-kube-api-access-9l2hq" (OuterVolumeSpecName: "kube-api-access-9l2hq") pod "97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" (UID: "97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9"). InnerVolumeSpecName "kube-api-access-9l2hq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.227139 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" (UID: "97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.310320 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9l2hq\" (UniqueName: \"kubernetes.io/projected/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-kube-api-access-9l2hq\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.310362 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.310384 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.346398 4682 generic.go:334] "Generic (PLEG): container finished" podID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerID="745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947" exitCode=0 Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.346502 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerDied","Data":"745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947"} Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.346509 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vv9wv" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.346540 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vv9wv" event={"ID":"97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9","Type":"ContainerDied","Data":"b8c011a65900b8556f0c217029c714c88d087ea2cf07558cab45feb99d2f5755"} Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.346562 4682 scope.go:117] "RemoveContainer" containerID="745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.368288 4682 scope.go:117] "RemoveContainer" containerID="17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.393132 4682 scope.go:117] "RemoveContainer" containerID="067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.393891 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" path="/var/lib/kubelet/pods/abde8dd6-2027-45fa-9052-e619c5cadecf/volumes" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.394808 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv9wv"] Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.394849 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vv9wv"] Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.406315 4682 scope.go:117] "RemoveContainer" containerID="745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947" Dec 10 10:49:20 crc kubenswrapper[4682]: E1210 10:49:20.406842 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947\": container with ID starting with 745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947 not found: ID does not exist" containerID="745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.406901 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947"} err="failed to get container status \"745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947\": rpc error: code = NotFound desc = could not find container \"745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947\": container with ID starting with 745d148c50237dd99344726fc3c0fb1828fbf09f410417cd22f67d34c3459947 not found: ID does not exist" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.406938 4682 scope.go:117] "RemoveContainer" containerID="17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360" Dec 10 10:49:20 crc kubenswrapper[4682]: E1210 10:49:20.407426 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360\": container with ID starting with 17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360 not found: ID does not exist" containerID="17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.407451 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360"} err="failed to get container status \"17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360\": rpc error: code = NotFound desc = could not find container \"17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360\": container with ID starting with 17da801db36eb922160f451c2af9c5c63412b8f467ee20025230503d3f46d360 not found: ID does not exist" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.407489 4682 scope.go:117] "RemoveContainer" containerID="067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430" Dec 10 10:49:20 crc kubenswrapper[4682]: E1210 10:49:20.407798 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430\": container with ID starting with 067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430 not found: ID does not exist" containerID="067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.407848 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430"} err="failed to get container status \"067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430\": rpc error: code = NotFound desc = could not find container \"067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430\": container with ID starting with 067b8ed120ae34aef363cb877410067af87e5219073f7fc1fa0ae206bc7dd430 not found: ID does not exist" Dec 10 10:49:20 crc kubenswrapper[4682]: I1210 10:49:20.744150 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4zh9p"] Dec 10 10:49:22 crc kubenswrapper[4682]: I1210 10:49:22.095562 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jxzps"] Dec 10 10:49:22 crc kubenswrapper[4682]: I1210 10:49:22.095787 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jxzps" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="registry-server" containerID="cri-o://f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8" gracePeriod=2 Dec 10 10:49:22 crc kubenswrapper[4682]: I1210 10:49:22.388536 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" path="/var/lib/kubelet/pods/97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9/volumes" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.025763 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.142856 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-utilities\") pod \"2e7d49cd-f122-470b-b278-874045f4f089\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.142986 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gq96\" (UniqueName: \"kubernetes.io/projected/2e7d49cd-f122-470b-b278-874045f4f089-kube-api-access-6gq96\") pod \"2e7d49cd-f122-470b-b278-874045f4f089\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.143062 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-catalog-content\") pod \"2e7d49cd-f122-470b-b278-874045f4f089\" (UID: \"2e7d49cd-f122-470b-b278-874045f4f089\") " Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.144355 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-utilities" (OuterVolumeSpecName: "utilities") pod "2e7d49cd-f122-470b-b278-874045f4f089" (UID: "2e7d49cd-f122-470b-b278-874045f4f089"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.150094 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e7d49cd-f122-470b-b278-874045f4f089-kube-api-access-6gq96" (OuterVolumeSpecName: "kube-api-access-6gq96") pod "2e7d49cd-f122-470b-b278-874045f4f089" (UID: "2e7d49cd-f122-470b-b278-874045f4f089"). InnerVolumeSpecName "kube-api-access-6gq96". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.245505 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gq96\" (UniqueName: \"kubernetes.io/projected/2e7d49cd-f122-470b-b278-874045f4f089-kube-api-access-6gq96\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.245551 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.263819 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e7d49cd-f122-470b-b278-874045f4f089" (UID: "2e7d49cd-f122-470b-b278-874045f4f089"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.346364 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e7d49cd-f122-470b-b278-874045f4f089-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.363509 4682 generic.go:334] "Generic (PLEG): container finished" podID="2e7d49cd-f122-470b-b278-874045f4f089" containerID="f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8" exitCode=0 Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.363559 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerDied","Data":"f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8"} Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.363588 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jxzps" event={"ID":"2e7d49cd-f122-470b-b278-874045f4f089","Type":"ContainerDied","Data":"c2ce5964360b39dc79fb3f93cdec99c66192ed729966ac53869427aed1956b9d"} Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.363611 4682 scope.go:117] "RemoveContainer" containerID="f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.363740 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jxzps" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.388975 4682 scope.go:117] "RemoveContainer" containerID="309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.400905 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jxzps"] Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.405410 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jxzps"] Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.420083 4682 scope.go:117] "RemoveContainer" containerID="e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.433041 4682 scope.go:117] "RemoveContainer" containerID="f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8" Dec 10 10:49:23 crc kubenswrapper[4682]: E1210 10:49:23.433501 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8\": container with ID starting with f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8 not found: ID does not exist" containerID="f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.433544 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8"} err="failed to get container status \"f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8\": rpc error: code = NotFound desc = could not find container \"f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8\": container with ID starting with f4d1cd2dead5ee669eb7cd611c711cc0d08ea5d1470ded403dc6f90faeeeeba8 not found: ID does not exist" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.433574 4682 scope.go:117] "RemoveContainer" containerID="309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f" Dec 10 10:49:23 crc kubenswrapper[4682]: E1210 10:49:23.433956 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f\": container with ID starting with 309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f not found: ID does not exist" containerID="309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.434005 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f"} err="failed to get container status \"309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f\": rpc error: code = NotFound desc = could not find container \"309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f\": container with ID starting with 309f7bedbb6469220a56db97adbbfa7ee28e5cc8333c3018bf3924a1cfc6d39f not found: ID does not exist" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.434042 4682 scope.go:117] "RemoveContainer" containerID="e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f" Dec 10 10:49:23 crc kubenswrapper[4682]: E1210 10:49:23.434372 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f\": container with ID starting with e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f not found: ID does not exist" containerID="e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f" Dec 10 10:49:23 crc kubenswrapper[4682]: I1210 10:49:23.434409 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f"} err="failed to get container status \"e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f\": rpc error: code = NotFound desc = could not find container \"e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f\": container with ID starting with e30834d80fb1c3fe4440c1aa46c5962fae0d27cb9f87fd1f651473fd678d8a0f not found: ID does not exist" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.387583 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e7d49cd-f122-470b-b278-874045f4f089" path="/var/lib/kubelet/pods/2e7d49cd-f122-470b-b278-874045f4f089/volumes" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815140 4682 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815677 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815698 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815735 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815743 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815752 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815758 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815769 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815777 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815810 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815818 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815831 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9c0a192-28a7-434f-bc5c-c4680e687a76" containerName="pruner" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815837 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9c0a192-28a7-434f-bc5c-c4680e687a76" containerName="pruner" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815849 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815856 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815886 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815894 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815911 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815919 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815932 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815941 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="extract-utilities" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815974 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815982 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="extract-content" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.815992 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.815999 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.816012 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816019 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816211 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e7d49cd-f122-470b-b278-874045f4f089" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816227 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="81310b9c-2d81-4693-afa2-14bfa74e3bc9" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816235 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="97d7e453-19ca-4a37-9cbf-aec8a7c0ebd9" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816244 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="abde8dd6-2027-45fa-9052-e619c5cadecf" containerName="registry-server" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816256 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9c0a192-28a7-434f-bc5c-c4680e687a76" containerName="pruner" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816793 4682 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816836 4682 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.816968 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816977 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.816986 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.816992 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.817003 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817010 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.817016 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817022 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.817032 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817038 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.817046 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817051 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817136 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817145 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817153 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817161 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817168 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817176 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 10:49:24 crc kubenswrapper[4682]: E1210 10:49:24.817268 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817275 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817428 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.817939 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7" gracePeriod=15 Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.818046 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372" gracePeriod=15 Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.818116 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4" gracePeriod=15 Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.818130 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1" gracePeriod=15 Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.818096 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457" gracePeriod=15 Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.821441 4682 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.862110 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.867424 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.867500 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.867525 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.867545 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.867566 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.867949 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.868278 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.868581 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971154 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971225 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971256 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971466 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971515 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971535 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971563 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971559 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971644 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971697 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971705 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971731 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971755 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971760 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971595 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:24 crc kubenswrapper[4682]: I1210 10:49:24.971796 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.158052 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:49:25 crc kubenswrapper[4682]: E1210 10:49:25.182490 4682 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187fd4fdbafd3f67 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:49:25.181497191 +0000 UTC m=+245.501707971,LastTimestamp:2025-12-10 10:49:25.181497191 +0000 UTC m=+245.501707971,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.380100 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.381650 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.382602 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372" exitCode=0 Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.382625 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4" exitCode=0 Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.382632 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457" exitCode=0 Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.382639 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1" exitCode=2 Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.382685 4682 scope.go:117] "RemoveContainer" containerID="5abd22adb8d720e9c9fc6c8bfb366da2ef12aaeb403a9e3a3fb9d310a79130fe" Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.384263 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"fc7737de836537c412cb963691180d748c3dc25a6a724b897a7c1c04b25f99bc"} Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.386452 4682 generic.go:334] "Generic (PLEG): container finished" podID="d666f615-9508-4824-830c-4b56aec338c0" containerID="c61ea1478c9bf6a848f45e778e7f1d79309db2817008e5e38adef419455e367a" exitCode=0 Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.386519 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d666f615-9508-4824-830c-4b56aec338c0","Type":"ContainerDied","Data":"c61ea1478c9bf6a848f45e778e7f1d79309db2817008e5e38adef419455e367a"} Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.387332 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:25 crc kubenswrapper[4682]: I1210 10:49:25.387641 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.404209 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.406756 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"80094a08269d3da63553a40e52f8bfbd850fdf81b78fb4c5c43b843b2109ca42"} Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.408288 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.408847 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.637927 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.638673 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.639150 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.800130 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d666f615-9508-4824-830c-4b56aec338c0-kube-api-access\") pod \"d666f615-9508-4824-830c-4b56aec338c0\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.800202 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-kubelet-dir\") pod \"d666f615-9508-4824-830c-4b56aec338c0\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.800283 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-var-lock\") pod \"d666f615-9508-4824-830c-4b56aec338c0\" (UID: \"d666f615-9508-4824-830c-4b56aec338c0\") " Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.800527 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-var-lock" (OuterVolumeSpecName: "var-lock") pod "d666f615-9508-4824-830c-4b56aec338c0" (UID: "d666f615-9508-4824-830c-4b56aec338c0"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.800568 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d666f615-9508-4824-830c-4b56aec338c0" (UID: "d666f615-9508-4824-830c-4b56aec338c0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.806120 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d666f615-9508-4824-830c-4b56aec338c0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d666f615-9508-4824-830c-4b56aec338c0" (UID: "d666f615-9508-4824-830c-4b56aec338c0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.901885 4682 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-var-lock\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.902091 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d666f615-9508-4824-830c-4b56aec338c0-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:26 crc kubenswrapper[4682]: I1210 10:49:26.902154 4682 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d666f615-9508-4824-830c-4b56aec338c0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.417281 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d666f615-9508-4824-830c-4b56aec338c0","Type":"ContainerDied","Data":"b2c26f77ee0f92f9003b95ba89aaf9d63ab7a0121938109f4d8766e73c33e1a8"} Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.417494 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2c26f77ee0f92f9003b95ba89aaf9d63ab7a0121938109f4d8766e73c33e1a8" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.417317 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.482162 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.482700 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.734737 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.735614 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.736232 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.736646 4682 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.737348 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913410 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913450 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913501 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913636 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913695 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913713 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913869 4682 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913886 4682 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:27 crc kubenswrapper[4682]: I1210 10:49:27.913898 4682 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.390293 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.425611 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.426236 4682 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7" exitCode=0 Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.426284 4682 scope.go:117] "RemoveContainer" containerID="2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.426410 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.427276 4682 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.427535 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.427731 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.430736 4682 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.431102 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.431298 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.450715 4682 scope.go:117] "RemoveContainer" containerID="11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.469884 4682 scope.go:117] "RemoveContainer" containerID="d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.484617 4682 scope.go:117] "RemoveContainer" containerID="7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.500732 4682 scope.go:117] "RemoveContainer" containerID="fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.523209 4682 scope.go:117] "RemoveContainer" containerID="b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.543186 4682 scope.go:117] "RemoveContainer" containerID="2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372" Dec 10 10:49:28 crc kubenswrapper[4682]: E1210 10:49:28.544047 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\": container with ID starting with 2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372 not found: ID does not exist" containerID="2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.544459 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372"} err="failed to get container status \"2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\": rpc error: code = NotFound desc = could not find container \"2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372\": container with ID starting with 2259b5af3a126a30279cfe55e6fbe00615f313a9df103593b39bcefae663b372 not found: ID does not exist" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.544589 4682 scope.go:117] "RemoveContainer" containerID="11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4" Dec 10 10:49:28 crc kubenswrapper[4682]: E1210 10:49:28.545540 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\": container with ID starting with 11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4 not found: ID does not exist" containerID="11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.545613 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4"} err="failed to get container status \"11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\": rpc error: code = NotFound desc = could not find container \"11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4\": container with ID starting with 11318f7c54f0ec10cc646907da86de6d540791ade24855d948c6efcbea9c5be4 not found: ID does not exist" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.545910 4682 scope.go:117] "RemoveContainer" containerID="d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457" Dec 10 10:49:28 crc kubenswrapper[4682]: E1210 10:49:28.546956 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\": container with ID starting with d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457 not found: ID does not exist" containerID="d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.547054 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457"} err="failed to get container status \"d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\": rpc error: code = NotFound desc = could not find container \"d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457\": container with ID starting with d34b495fd38f428900ee19a41fa74e79ecbaa0a4c96f15a5b3f1eea6bd080457 not found: ID does not exist" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.547161 4682 scope.go:117] "RemoveContainer" containerID="7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1" Dec 10 10:49:28 crc kubenswrapper[4682]: E1210 10:49:28.547671 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\": container with ID starting with 7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1 not found: ID does not exist" containerID="7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.547794 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1"} err="failed to get container status \"7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\": rpc error: code = NotFound desc = could not find container \"7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1\": container with ID starting with 7f502be1cbaf700acbad9c207b2c8a0f042e6c641e5757f3ccc9028ede5bfdc1 not found: ID does not exist" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.547891 4682 scope.go:117] "RemoveContainer" containerID="fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7" Dec 10 10:49:28 crc kubenswrapper[4682]: E1210 10:49:28.548268 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\": container with ID starting with fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7 not found: ID does not exist" containerID="fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.548363 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7"} err="failed to get container status \"fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\": rpc error: code = NotFound desc = could not find container \"fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7\": container with ID starting with fcac04306a80bebd394926c3566411a9d60eb50f779c97c7328a975a022c7dd7 not found: ID does not exist" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.548447 4682 scope.go:117] "RemoveContainer" containerID="b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0" Dec 10 10:49:28 crc kubenswrapper[4682]: E1210 10:49:28.548845 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\": container with ID starting with b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0 not found: ID does not exist" containerID="b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0" Dec 10 10:49:28 crc kubenswrapper[4682]: I1210 10:49:28.548945 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0"} err="failed to get container status \"b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\": rpc error: code = NotFound desc = could not find container \"b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0\": container with ID starting with b94983a314ac134ce69eabd4152f6be8a3142f3df1ddfdc09d1214e5019895a0 not found: ID does not exist" Dec 10 10:49:29 crc kubenswrapper[4682]: E1210 10:49:29.898156 4682 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:29 crc kubenswrapper[4682]: E1210 10:49:29.898774 4682 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:29 crc kubenswrapper[4682]: E1210 10:49:29.899121 4682 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:29 crc kubenswrapper[4682]: E1210 10:49:29.900619 4682 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:29 crc kubenswrapper[4682]: E1210 10:49:29.901147 4682 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:29 crc kubenswrapper[4682]: I1210 10:49:29.901215 4682 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 10 10:49:29 crc kubenswrapper[4682]: E1210 10:49:29.901722 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="200ms" Dec 10 10:49:30 crc kubenswrapper[4682]: E1210 10:49:30.102011 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="400ms" Dec 10 10:49:30 crc kubenswrapper[4682]: I1210 10:49:30.388363 4682 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:30 crc kubenswrapper[4682]: I1210 10:49:30.388880 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:30 crc kubenswrapper[4682]: I1210 10:49:30.389429 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:30 crc kubenswrapper[4682]: E1210 10:49:30.504039 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="800ms" Dec 10 10:49:31 crc kubenswrapper[4682]: E1210 10:49:31.304728 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="1.6s" Dec 10 10:49:32 crc kubenswrapper[4682]: E1210 10:49:32.906531 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="3.2s" Dec 10 10:49:34 crc kubenswrapper[4682]: E1210 10:49:34.089613 4682 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187fd4fdbafd3f67 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 10:49:25.181497191 +0000 UTC m=+245.501707971,LastTimestamp:2025-12-10 10:49:25.181497191 +0000 UTC m=+245.501707971,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 10:49:36 crc kubenswrapper[4682]: E1210 10:49:36.108588 4682 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="6.4s" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.380396 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.381932 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.382398 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.392864 4682 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.392904 4682 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:38 crc kubenswrapper[4682]: E1210 10:49:38.393313 4682 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.393736 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:38 crc kubenswrapper[4682]: I1210 10:49:38.487878 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"599ae07a27f5a96e221c7dbcbfb27e3cb4bb585e36527077e753b61def12ed61"} Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.495268 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.496240 4682 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b" exitCode=1 Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.496346 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b"} Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.497011 4682 scope.go:117] "RemoveContainer" containerID="67cf3ecacff49dff9c3195bcd44e51bfb0fffd3e5863cbaece5bcc2e99cccb8b" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.497152 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.497346 4682 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.497712 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.498695 4682 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="2fc0d748b314f623f39931357b10c0526031cf2cb49ce5cac21b895ace0680d6" exitCode=0 Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.498737 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"2fc0d748b314f623f39931357b10c0526031cf2cb49ce5cac21b895ace0680d6"} Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.498981 4682 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.499005 4682 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.499232 4682 status_manager.go:851] "Failed to get status for pod" podUID="d666f615-9508-4824-830c-4b56aec338c0" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:39 crc kubenswrapper[4682]: E1210 10:49:39.499443 4682 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.499444 4682 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:39 crc kubenswrapper[4682]: I1210 10:49:39.499783 4682 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Dec 10 10:49:40 crc kubenswrapper[4682]: I1210 10:49:40.506857 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cd74baeba83f7f5665c2ba6fbc82b335e00d39dd87bb59baa7a545bc08243a95"} Dec 10 10:49:40 crc kubenswrapper[4682]: I1210 10:49:40.506910 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a6225494a4892720ab9a884a6fcc173371c5c7a9e6fc9b59af7e7e418016e09e"} Dec 10 10:49:40 crc kubenswrapper[4682]: I1210 10:49:40.506925 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2da325d60586f43387e76e033d62c5e521cb657d9c8d4b19dd822fe00444eee0"} Dec 10 10:49:40 crc kubenswrapper[4682]: I1210 10:49:40.510202 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 10 10:49:40 crc kubenswrapper[4682]: I1210 10:49:40.510246 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f6b766647d14a425399730b548dff048362886b148d8a792cecf4dbfc04a0c04"} Dec 10 10:49:41 crc kubenswrapper[4682]: I1210 10:49:41.517782 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5386abc337eae149a83c14fdd1a36a95290efbbe7454bf0d538c4e7041ede950"} Dec 10 10:49:41 crc kubenswrapper[4682]: I1210 10:49:41.518169 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"71c262c09d7a129bfa0637a587085dfb211f3b1867a1e5b32fd57a34d1400e37"} Dec 10 10:49:41 crc kubenswrapper[4682]: I1210 10:49:41.518212 4682 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:41 crc kubenswrapper[4682]: I1210 10:49:41.518234 4682 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:41 crc kubenswrapper[4682]: I1210 10:49:41.518217 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:43 crc kubenswrapper[4682]: I1210 10:49:43.394566 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:43 crc kubenswrapper[4682]: I1210 10:49:43.394942 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:43 crc kubenswrapper[4682]: I1210 10:49:43.400695 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:44 crc kubenswrapper[4682]: I1210 10:49:44.371724 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:49:45 crc kubenswrapper[4682]: I1210 10:49:45.773503 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" podUID="7076dac7-bf2d-4191-81f5-73b260ff0a75" containerName="oauth-openshift" containerID="cri-o://5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8" gracePeriod=15 Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.142829 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342229 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-service-ca\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342295 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-trusted-ca-bundle\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342317 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-ocp-branding-template\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342339 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-login\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342363 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rttvq\" (UniqueName: \"kubernetes.io/projected/7076dac7-bf2d-4191-81f5-73b260ff0a75-kube-api-access-rttvq\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342386 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-provider-selection\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342422 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-router-certs\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342445 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-error\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342490 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-cliconfig\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342507 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-session\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342531 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-policies\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342558 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-dir\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342574 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-serving-cert\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.342594 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-idp-0-file-data\") pod \"7076dac7-bf2d-4191-81f5-73b260ff0a75\" (UID: \"7076dac7-bf2d-4191-81f5-73b260ff0a75\") " Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.344178 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.344778 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.345640 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.348674 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.349041 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.349570 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.350458 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.350760 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.350947 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.351550 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.351691 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.352061 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.352818 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.364863 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7076dac7-bf2d-4191-81f5-73b260ff0a75-kube-api-access-rttvq" (OuterVolumeSpecName: "kube-api-access-rttvq") pod "7076dac7-bf2d-4191-81f5-73b260ff0a75" (UID: "7076dac7-bf2d-4191-81f5-73b260ff0a75"). InnerVolumeSpecName "kube-api-access-rttvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.389723 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.392538 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.443940 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.443995 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444012 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444026 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444039 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rttvq\" (UniqueName: \"kubernetes.io/projected/7076dac7-bf2d-4191-81f5-73b260ff0a75-kube-api-access-rttvq\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444050 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444059 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444068 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444078 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444098 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444111 4682 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444121 4682 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7076dac7-bf2d-4191-81f5-73b260ff0a75-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444130 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.444141 4682 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7076dac7-bf2d-4191-81f5-73b260ff0a75-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.547247 4682 generic.go:334] "Generic (PLEG): container finished" podID="7076dac7-bf2d-4191-81f5-73b260ff0a75" containerID="5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8" exitCode=0 Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.547667 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.547548 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" event={"ID":"7076dac7-bf2d-4191-81f5-73b260ff0a75","Type":"ContainerDied","Data":"5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8"} Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.548209 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4zh9p" event={"ID":"7076dac7-bf2d-4191-81f5-73b260ff0a75","Type":"ContainerDied","Data":"a5baa24c754af8cfa79ca635389855ea90bd2c5c01ed355af500d22d3bf8ffde"} Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.548250 4682 scope.go:117] "RemoveContainer" containerID="5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.562634 4682 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.596790 4682 scope.go:117] "RemoveContainer" containerID="5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8" Dec 10 10:49:46 crc kubenswrapper[4682]: E1210 10:49:46.603939 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8\": container with ID starting with 5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8 not found: ID does not exist" containerID="5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.603977 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8"} err="failed to get container status \"5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8\": rpc error: code = NotFound desc = could not find container \"5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8\": container with ID starting with 5f54657b631f251fa77f7b7ab37d5ecf1872197d0b05370eb3811fa27aa966a8 not found: ID does not exist" Dec 10 10:49:46 crc kubenswrapper[4682]: I1210 10:49:46.661093 4682 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="db9098d2-f583-48aa-a740-6afe1b745aef" Dec 10 10:49:46 crc kubenswrapper[4682]: E1210 10:49:46.869211 4682 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\": Failed to watch *v1.Secret: unknown (get secrets)" logger="UnhandledError" Dec 10 10:49:46 crc kubenswrapper[4682]: E1210 10:49:46.907597 4682 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\": Failed to watch *v1.Secret: unknown (get secrets)" logger="UnhandledError" Dec 10 10:49:47 crc kubenswrapper[4682]: E1210 10:49:47.198488 4682 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\": Failed to watch *v1.ConfigMap: unknown (get configmaps)" logger="UnhandledError" Dec 10 10:49:47 crc kubenswrapper[4682]: E1210 10:49:47.208247 4682 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\": Failed to watch *v1.Secret: unknown (get secrets)" logger="UnhandledError" Dec 10 10:49:47 crc kubenswrapper[4682]: I1210 10:49:47.553178 4682 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:47 crc kubenswrapper[4682]: I1210 10:49:47.553506 4682 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:49:47 crc kubenswrapper[4682]: I1210 10:49:47.555386 4682 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="db9098d2-f583-48aa-a740-6afe1b745aef" Dec 10 10:49:47 crc kubenswrapper[4682]: E1210 10:49:47.831080 4682 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\": Failed to watch *v1.ConfigMap: unknown (get configmaps)" logger="UnhandledError" Dec 10 10:49:54 crc kubenswrapper[4682]: I1210 10:49:54.375761 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 10:49:56 crc kubenswrapper[4682]: I1210 10:49:56.155077 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 10:49:56 crc kubenswrapper[4682]: I1210 10:49:56.410726 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 10 10:49:56 crc kubenswrapper[4682]: I1210 10:49:56.509678 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 10 10:49:56 crc kubenswrapper[4682]: I1210 10:49:56.998026 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:49:57 crc kubenswrapper[4682]: I1210 10:49:57.225795 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:49:57 crc kubenswrapper[4682]: I1210 10:49:57.686392 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 10 10:49:57 crc kubenswrapper[4682]: I1210 10:49:57.834920 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 10 10:49:57 crc kubenswrapper[4682]: I1210 10:49:57.904457 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.366256 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.395991 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.424105 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.489298 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.523112 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.532261 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.614164 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.626047 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.656157 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.692838 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.795830 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 10 10:49:58 crc kubenswrapper[4682]: I1210 10:49:58.994571 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.271839 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.394569 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.489938 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.551978 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.691155 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.855339 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.874287 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.891395 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.904534 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 10 10:49:59 crc kubenswrapper[4682]: I1210 10:49:59.956080 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.078303 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.122846 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.190521 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.235528 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.256214 4682 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.269398 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.292524 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.297559 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.309022 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.311568 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.320747 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.370055 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.420258 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.489727 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.500456 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.531767 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.572586 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.609838 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.723834 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.734179 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.735665 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.772850 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.833283 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.898559 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.902662 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 10 10:50:00 crc kubenswrapper[4682]: I1210 10:50:00.985739 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.017878 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.071042 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.107744 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.167432 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.196387 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.208458 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.247640 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.308899 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.319595 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.392244 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.429418 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.543242 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.589102 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.639831 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.660405 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.667149 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.686160 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.700581 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.711034 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.746287 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.799820 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.811801 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.812398 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.814634 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.825338 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.834376 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.843239 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.862351 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.868810 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.900917 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 10 10:50:01 crc kubenswrapper[4682]: I1210 10:50:01.979380 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.006709 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.023799 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.176959 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.259553 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.276619 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.369746 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.468972 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.580042 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.602833 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.620815 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.629207 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.684080 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.702569 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 10:50:02 crc kubenswrapper[4682]: I1210 10:50:02.900511 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.019002 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.075413 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.142618 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.187721 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.223236 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.346054 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.357545 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.552060 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.657704 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.664918 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.695138 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 10 10:50:03 crc kubenswrapper[4682]: I1210 10:50:03.934780 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.006499 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.011721 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.019068 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.080364 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.118877 4682 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.175970 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.219346 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.277784 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.406508 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.415026 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.423459 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.497958 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.518752 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.596432 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.629924 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.691905 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.716046 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.752135 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 10 10:50:04 crc kubenswrapper[4682]: I1210 10:50:04.759212 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:04.924917 4682 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:04.934429 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.120152 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.341681 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.370787 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.484198 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.516148 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.703074 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.733960 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.822887 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.824395 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.843946 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.964262 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 10 10:50:05 crc kubenswrapper[4682]: I1210 10:50:05.991180 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.067882 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.068886 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.219724 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.295916 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.343286 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.344981 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.351306 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.426863 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.446092 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.487778 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.540693 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.627576 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.714234 4682 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.715149 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.750946 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.799990 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.816865 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.816907 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.836886 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.837575 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.902193 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 10 10:50:06 crc kubenswrapper[4682]: I1210 10:50:06.966241 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.036186 4682 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.063160 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.129177 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.242039 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.271740 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.286789 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.304778 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.306777 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.329421 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.431178 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.483548 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.515025 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.563615 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.713139 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.878121 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.920069 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.961560 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.970878 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.975581 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 10 10:50:07 crc kubenswrapper[4682]: I1210 10:50:07.989703 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.024910 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.192500 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.261313 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.315203 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.393053 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.525302 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.604197 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.722339 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.771024 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.807570 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.902931 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 10 10:50:08 crc kubenswrapper[4682]: I1210 10:50:08.905516 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.028503 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.043725 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.090146 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.103225 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.166427 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.209343 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.330285 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.485685 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.685111 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 10 10:50:09 crc kubenswrapper[4682]: I1210 10:50:09.923086 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.041168 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.051761 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.281584 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.364217 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.490465 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.496597 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.699202 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.771564 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.816407 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.975799 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.982393 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 10:50:10 crc kubenswrapper[4682]: I1210 10:50:10.991898 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.013144 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.081206 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.155428 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.207814 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.332559 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.357708 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.432763 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.482764 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.545904 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.613681 4682 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.618806 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=47.618785869999996 podStartE2EDuration="47.61878587s" podCreationTimestamp="2025-12-10 10:49:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:49:46.686539644 +0000 UTC m=+267.006750404" watchObservedRunningTime="2025-12-10 10:50:11.61878587 +0000 UTC m=+291.938996630" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.619450 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4zh9p","openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.619644 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-5bc77478bd-tff6t"] Dec 10 10:50:11 crc kubenswrapper[4682]: E1210 10:50:11.619935 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d666f615-9508-4824-830c-4b56aec338c0" containerName="installer" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.620027 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d666f615-9508-4824-830c-4b56aec338c0" containerName="installer" Dec 10 10:50:11 crc kubenswrapper[4682]: E1210 10:50:11.620106 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7076dac7-bf2d-4191-81f5-73b260ff0a75" containerName="oauth-openshift" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.620185 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7076dac7-bf2d-4191-81f5-73b260ff0a75" containerName="oauth-openshift" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.620097 4682 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.620312 4682 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="412547b9-dcab-487e-a6fc-bb7e3fe2b324" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.620523 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="7076dac7-bf2d-4191-81f5-73b260ff0a75" containerName="oauth-openshift" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.620688 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d666f615-9508-4824-830c-4b56aec338c0" containerName="installer" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.621248 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.623743 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.623987 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.624422 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.625012 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.625244 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.625421 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.625458 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.627041 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.625638 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.625687 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.626001 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.626084 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.626131 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.627228 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.638613 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.642051 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.649346 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.672176 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=25.672157992 podStartE2EDuration="25.672157992s" podCreationTimestamp="2025-12-10 10:49:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:11.670792068 +0000 UTC m=+291.991002838" watchObservedRunningTime="2025-12-10 10:50:11.672157992 +0000 UTC m=+291.992368742" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760603 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a93dc38b-c5ec-4631-a338-1a372ae84784-audit-dir\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760657 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760679 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-service-ca\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760699 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-session\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760721 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760739 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760756 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-router-certs\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760783 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760835 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvwfl\" (UniqueName: \"kubernetes.io/projected/a93dc38b-c5ec-4631-a338-1a372ae84784-kube-api-access-rvwfl\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760860 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-login\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760876 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-audit-policies\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760900 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760929 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-error\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.760947 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862679 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862738 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-router-certs\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862778 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862812 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvwfl\" (UniqueName: \"kubernetes.io/projected/a93dc38b-c5ec-4631-a338-1a372ae84784-kube-api-access-rvwfl\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862842 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-login\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862869 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-audit-policies\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862901 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862945 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-error\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.862969 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.863000 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a93dc38b-c5ec-4631-a338-1a372ae84784-audit-dir\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.863031 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.863056 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-service-ca\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.863078 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-session\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.863103 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.864624 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.865023 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-audit-policies\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.865097 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a93dc38b-c5ec-4631-a338-1a372ae84784-audit-dir\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.865610 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-service-ca\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.868975 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-error\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.869424 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.869761 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.869804 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.869989 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-router-certs\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.870077 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-session\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.870354 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-template-login\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.871885 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.873299 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a93dc38b-c5ec-4631-a338-1a372ae84784-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.887330 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvwfl\" (UniqueName: \"kubernetes.io/projected/a93dc38b-c5ec-4631-a338-1a372ae84784-kube-api-access-rvwfl\") pod \"oauth-openshift-5bc77478bd-tff6t\" (UID: \"a93dc38b-c5ec-4631-a338-1a372ae84784\") " pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.928188 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 10:50:11 crc kubenswrapper[4682]: I1210 10:50:11.945122 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.341011 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5bc77478bd-tff6t"] Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.369625 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.388423 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7076dac7-bf2d-4191-81f5-73b260ff0a75" path="/var/lib/kubelet/pods/7076dac7-bf2d-4191-81f5-73b260ff0a75/volumes" Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.622461 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.693727 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" event={"ID":"a93dc38b-c5ec-4631-a338-1a372ae84784","Type":"ContainerStarted","Data":"a8538a9e3d800467dc9d5d2fc40a3501c5e70d2cf3ce876c5dc5f990dbdef03b"} Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.693760 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" event={"ID":"a93dc38b-c5ec-4631-a338-1a372ae84784","Type":"ContainerStarted","Data":"c5e420e6d6842e82319d26a6422b17a57496e87c219fda63b80ba7964b44496d"} Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.695347 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:12 crc kubenswrapper[4682]: I1210 10:50:12.824263 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 10 10:50:13 crc kubenswrapper[4682]: I1210 10:50:13.104581 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" Dec 10 10:50:13 crc kubenswrapper[4682]: I1210 10:50:13.125080 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5bc77478bd-tff6t" podStartSLOduration=53.125063934 podStartE2EDuration="53.125063934s" podCreationTimestamp="2025-12-10 10:49:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:12.719442002 +0000 UTC m=+293.039652762" watchObservedRunningTime="2025-12-10 10:50:13.125063934 +0000 UTC m=+293.445274684" Dec 10 10:50:14 crc kubenswrapper[4682]: I1210 10:50:14.345969 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 10 10:50:19 crc kubenswrapper[4682]: I1210 10:50:19.338726 4682 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:50:19 crc kubenswrapper[4682]: I1210 10:50:19.339840 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://80094a08269d3da63553a40e52f8bfbd850fdf81b78fb4c5c43b843b2109ca42" gracePeriod=5 Dec 10 10:50:24 crc kubenswrapper[4682]: I1210 10:50:24.764016 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 10:50:24 crc kubenswrapper[4682]: I1210 10:50:24.764357 4682 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="80094a08269d3da63553a40e52f8bfbd850fdf81b78fb4c5c43b843b2109ca42" exitCode=137 Dec 10 10:50:24 crc kubenswrapper[4682]: I1210 10:50:24.901684 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 10:50:24 crc kubenswrapper[4682]: I1210 10:50:24.901752 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036279 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036623 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036642 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036400 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036703 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036762 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.036982 4682 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.037037 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.037068 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.037092 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.043989 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.137946 4682 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.137989 4682 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.138004 4682 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.138016 4682 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.773195 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.773267 4682 scope.go:117] "RemoveContainer" containerID="80094a08269d3da63553a40e52f8bfbd850fdf81b78fb4c5c43b843b2109ca42" Dec 10 10:50:25 crc kubenswrapper[4682]: I1210 10:50:25.773387 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 10:50:26 crc kubenswrapper[4682]: I1210 10:50:26.386705 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 10 10:50:26 crc kubenswrapper[4682]: I1210 10:50:26.386933 4682 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Dec 10 10:50:26 crc kubenswrapper[4682]: I1210 10:50:26.394638 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:50:26 crc kubenswrapper[4682]: I1210 10:50:26.394672 4682 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="d21c64dd-c505-4ac6-8edb-25d3d967f83e" Dec 10 10:50:26 crc kubenswrapper[4682]: I1210 10:50:26.397534 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 10:50:26 crc kubenswrapper[4682]: I1210 10:50:26.397567 4682 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="d21c64dd-c505-4ac6-8edb-25d3d967f83e" Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.484584 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gw5v5"] Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.484818 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" podUID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" containerName="controller-manager" containerID="cri-o://40128a0d5187a5eaa67019a8a7c9bfa5be585bb9b9733638d6d77c94fcb4533f" gracePeriod=30 Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.571926 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx"] Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.572609 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" podUID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" containerName="route-controller-manager" containerID="cri-o://f2a27b7ec101116308af764944b6e6436f059c77bb63da50077bf1ffb6894c8c" gracePeriod=30 Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.801682 4682 generic.go:334] "Generic (PLEG): container finished" podID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" containerID="40128a0d5187a5eaa67019a8a7c9bfa5be585bb9b9733638d6d77c94fcb4533f" exitCode=0 Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.801808 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" event={"ID":"97d3493e-8719-4556-bb3c-b2cfd0d39f0f","Type":"ContainerDied","Data":"40128a0d5187a5eaa67019a8a7c9bfa5be585bb9b9733638d6d77c94fcb4533f"} Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.801878 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" event={"ID":"97d3493e-8719-4556-bb3c-b2cfd0d39f0f","Type":"ContainerDied","Data":"f98537cbe3cfbba1a2898ff7c76aca850852f4a2c50ccd494e39db3283b40da5"} Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.801897 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f98537cbe3cfbba1a2898ff7c76aca850852f4a2c50ccd494e39db3283b40da5" Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.805715 4682 generic.go:334] "Generic (PLEG): container finished" podID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" containerID="f2a27b7ec101116308af764944b6e6436f059c77bb63da50077bf1ffb6894c8c" exitCode=0 Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.805777 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" event={"ID":"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea","Type":"ContainerDied","Data":"f2a27b7ec101116308af764944b6e6436f059c77bb63da50077bf1ffb6894c8c"} Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.817023 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.954836 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.998176 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-client-ca\") pod \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.998376 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szmc5\" (UniqueName: \"kubernetes.io/projected/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-kube-api-access-szmc5\") pod \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.998527 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-proxy-ca-bundles\") pod \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.998607 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-config\") pod \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " Dec 10 10:50:27 crc kubenswrapper[4682]: I1210 10:50:27.998657 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-serving-cert\") pod \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\" (UID: \"97d3493e-8719-4556-bb3c-b2cfd0d39f0f\") " Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.000926 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-client-ca" (OuterVolumeSpecName: "client-ca") pod "97d3493e-8719-4556-bb3c-b2cfd0d39f0f" (UID: "97d3493e-8719-4556-bb3c-b2cfd0d39f0f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.001007 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "97d3493e-8719-4556-bb3c-b2cfd0d39f0f" (UID: "97d3493e-8719-4556-bb3c-b2cfd0d39f0f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.001293 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-config" (OuterVolumeSpecName: "config") pod "97d3493e-8719-4556-bb3c-b2cfd0d39f0f" (UID: "97d3493e-8719-4556-bb3c-b2cfd0d39f0f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.006591 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-kube-api-access-szmc5" (OuterVolumeSpecName: "kube-api-access-szmc5") pod "97d3493e-8719-4556-bb3c-b2cfd0d39f0f" (UID: "97d3493e-8719-4556-bb3c-b2cfd0d39f0f"). InnerVolumeSpecName "kube-api-access-szmc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.006653 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "97d3493e-8719-4556-bb3c-b2cfd0d39f0f" (UID: "97d3493e-8719-4556-bb3c-b2cfd0d39f0f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.100510 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-serving-cert\") pod \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.100580 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bv6lc\" (UniqueName: \"kubernetes.io/projected/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-kube-api-access-bv6lc\") pod \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.100642 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-config\") pod \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.100664 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-client-ca\") pod \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\" (UID: \"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea\") " Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.100992 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.101017 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.101032 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szmc5\" (UniqueName: \"kubernetes.io/projected/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-kube-api-access-szmc5\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.101046 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.101057 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97d3493e-8719-4556-bb3c-b2cfd0d39f0f-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.102046 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-client-ca" (OuterVolumeSpecName: "client-ca") pod "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" (UID: "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.103030 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-config" (OuterVolumeSpecName: "config") pod "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" (UID: "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.105515 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" (UID: "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.105541 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-kube-api-access-bv6lc" (OuterVolumeSpecName: "kube-api-access-bv6lc") pod "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" (UID: "978faa75-5ae1-484d-9ad6-9fc04bb7e1ea"). InnerVolumeSpecName "kube-api-access-bv6lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.202448 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.202499 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.202509 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.202521 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bv6lc\" (UniqueName: \"kubernetes.io/projected/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea-kube-api-access-bv6lc\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.257586 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-czlxh"] Dec 10 10:50:28 crc kubenswrapper[4682]: E1210 10:50:28.257867 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" containerName="controller-manager" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.257894 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" containerName="controller-manager" Dec 10 10:50:28 crc kubenswrapper[4682]: E1210 10:50:28.257916 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" containerName="route-controller-manager" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.257924 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" containerName="route-controller-manager" Dec 10 10:50:28 crc kubenswrapper[4682]: E1210 10:50:28.257935 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.257942 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.258045 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.258063 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" containerName="controller-manager" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.258077 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" containerName="route-controller-manager" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.258684 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.278330 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-czlxh"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.334559 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-g927h"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.335346 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.352206 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-g927h"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.404348 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k8qz\" (UniqueName: \"kubernetes.io/projected/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-kube-api-access-7k8qz\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.404398 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-client-ca\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.405302 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-serving-cert\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.405345 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-config\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.405437 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-proxy-ca-bundles\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.506944 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k8qz\" (UniqueName: \"kubernetes.io/projected/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-kube-api-access-7k8qz\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507016 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-config\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507052 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-client-ca\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507108 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-client-ca\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507168 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f258d23-9186-49df-a26e-b972113a0aa5-serving-cert\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507269 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-serving-cert\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507314 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2fc8\" (UniqueName: \"kubernetes.io/projected/5f258d23-9186-49df-a26e-b972113a0aa5-kube-api-access-c2fc8\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507349 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-config\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.507418 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-proxy-ca-bundles\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.509174 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-client-ca\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.509998 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-proxy-ca-bundles\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.510052 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-config\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.512271 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-serving-cert\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.522315 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k8qz\" (UniqueName: \"kubernetes.io/projected/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-kube-api-access-7k8qz\") pod \"controller-manager-555699bcb5-czlxh\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.580886 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.608738 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2fc8\" (UniqueName: \"kubernetes.io/projected/5f258d23-9186-49df-a26e-b972113a0aa5-kube-api-access-c2fc8\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.608918 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-config\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.608987 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-client-ca\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.609050 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f258d23-9186-49df-a26e-b972113a0aa5-serving-cert\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.610667 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-config\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.610745 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-client-ca\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.613043 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f258d23-9186-49df-a26e-b972113a0aa5-serving-cert\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.628337 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2fc8\" (UniqueName: \"kubernetes.io/projected/5f258d23-9186-49df-a26e-b972113a0aa5-kube-api-access-c2fc8\") pod \"route-controller-manager-55448fc787-g927h\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.652963 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.790562 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-czlxh"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.815003 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" event={"ID":"978faa75-5ae1-484d-9ad6-9fc04bb7e1ea","Type":"ContainerDied","Data":"7557d8ad9258f019d223a83ec784c9a30437f3372f35c4ad28b9df10fea96482"} Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.815298 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.815328 4682 scope.go:117] "RemoveContainer" containerID="f2a27b7ec101116308af764944b6e6436f059c77bb63da50077bf1ffb6894c8c" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.817886 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-gw5v5" Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.818589 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" event={"ID":"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2","Type":"ContainerStarted","Data":"5e8e318fa41dd679a75940f63a97adf77456c592f83dc31315e7c7825c90805e"} Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.836037 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gw5v5"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.839244 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-gw5v5"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.850496 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx"] Dec 10 10:50:28 crc kubenswrapper[4682]: I1210 10:50:28.853981 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-mp9vx"] Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.070735 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-g927h"] Dec 10 10:50:29 crc kubenswrapper[4682]: W1210 10:50:29.075849 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f258d23_9186_49df_a26e_b972113a0aa5.slice/crio-a1687e5cc5448377e75c5c97d77d960ba7f9ccf948f277aeaa0698babded54c9 WatchSource:0}: Error finding container a1687e5cc5448377e75c5c97d77d960ba7f9ccf948f277aeaa0698babded54c9: Status 404 returned error can't find the container with id a1687e5cc5448377e75c5c97d77d960ba7f9ccf948f277aeaa0698babded54c9 Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.394948 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-czlxh"] Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.401523 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-g927h"] Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.824505 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" event={"ID":"5f258d23-9186-49df-a26e-b972113a0aa5","Type":"ContainerStarted","Data":"34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab"} Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.824807 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" event={"ID":"5f258d23-9186-49df-a26e-b972113a0aa5","Type":"ContainerStarted","Data":"a1687e5cc5448377e75c5c97d77d960ba7f9ccf948f277aeaa0698babded54c9"} Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.824830 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.827434 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" event={"ID":"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2","Type":"ContainerStarted","Data":"f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f"} Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.827621 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.831947 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.832326 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.843433 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" podStartSLOduration=1.84341486 podStartE2EDuration="1.84341486s" podCreationTimestamp="2025-12-10 10:50:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:29.841803476 +0000 UTC m=+310.162014246" watchObservedRunningTime="2025-12-10 10:50:29.84341486 +0000 UTC m=+310.163625610" Dec 10 10:50:29 crc kubenswrapper[4682]: I1210 10:50:29.897445 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" podStartSLOduration=1.897426552 podStartE2EDuration="1.897426552s" podCreationTimestamp="2025-12-10 10:50:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:29.892206761 +0000 UTC m=+310.212417511" watchObservedRunningTime="2025-12-10 10:50:29.897426552 +0000 UTC m=+310.217637292" Dec 10 10:50:30 crc kubenswrapper[4682]: I1210 10:50:30.395393 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="978faa75-5ae1-484d-9ad6-9fc04bb7e1ea" path="/var/lib/kubelet/pods/978faa75-5ae1-484d-9ad6-9fc04bb7e1ea/volumes" Dec 10 10:50:30 crc kubenswrapper[4682]: I1210 10:50:30.397574 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97d3493e-8719-4556-bb3c-b2cfd0d39f0f" path="/var/lib/kubelet/pods/97d3493e-8719-4556-bb3c-b2cfd0d39f0f/volumes" Dec 10 10:50:30 crc kubenswrapper[4682]: I1210 10:50:30.832955 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" podUID="00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" containerName="controller-manager" containerID="cri-o://f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f" gracePeriod=30 Dec 10 10:50:30 crc kubenswrapper[4682]: I1210 10:50:30.833040 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" podUID="5f258d23-9186-49df-a26e-b972113a0aa5" containerName="route-controller-manager" containerID="cri-o://34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab" gracePeriod=30 Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.323843 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.331148 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.347921 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-config\") pod \"5f258d23-9186-49df-a26e-b972113a0aa5\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.347997 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k8qz\" (UniqueName: \"kubernetes.io/projected/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-kube-api-access-7k8qz\") pod \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348035 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-serving-cert\") pod \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348069 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-client-ca\") pod \"5f258d23-9186-49df-a26e-b972113a0aa5\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348100 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-client-ca\") pod \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348156 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2fc8\" (UniqueName: \"kubernetes.io/projected/5f258d23-9186-49df-a26e-b972113a0aa5-kube-api-access-c2fc8\") pod \"5f258d23-9186-49df-a26e-b972113a0aa5\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348198 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-config\") pod \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348251 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-proxy-ca-bundles\") pod \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\" (UID: \"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.348270 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f258d23-9186-49df-a26e-b972113a0aa5-serving-cert\") pod \"5f258d23-9186-49df-a26e-b972113a0aa5\" (UID: \"5f258d23-9186-49df-a26e-b972113a0aa5\") " Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.350926 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-client-ca" (OuterVolumeSpecName: "client-ca") pod "5f258d23-9186-49df-a26e-b972113a0aa5" (UID: "5f258d23-9186-49df-a26e-b972113a0aa5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.351817 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" (UID: "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.351887 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-config" (OuterVolumeSpecName: "config") pod "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" (UID: "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.351929 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-client-ca" (OuterVolumeSpecName: "client-ca") pod "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" (UID: "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.352283 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-config" (OuterVolumeSpecName: "config") pod "5f258d23-9186-49df-a26e-b972113a0aa5" (UID: "5f258d23-9186-49df-a26e-b972113a0aa5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.358350 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g"] Dec 10 10:50:31 crc kubenswrapper[4682]: E1210 10:50:31.358655 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f258d23-9186-49df-a26e-b972113a0aa5" containerName="route-controller-manager" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.358674 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f258d23-9186-49df-a26e-b972113a0aa5" containerName="route-controller-manager" Dec 10 10:50:31 crc kubenswrapper[4682]: E1210 10:50:31.358689 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" containerName="controller-manager" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.358700 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" containerName="controller-manager" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.358858 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f258d23-9186-49df-a26e-b972113a0aa5" containerName="route-controller-manager" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.358884 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" containerName="controller-manager" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.359310 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.359684 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-kube-api-access-7k8qz" (OuterVolumeSpecName: "kube-api-access-7k8qz") pod "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" (UID: "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2"). InnerVolumeSpecName "kube-api-access-7k8qz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.359826 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f258d23-9186-49df-a26e-b972113a0aa5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5f258d23-9186-49df-a26e-b972113a0aa5" (UID: "5f258d23-9186-49df-a26e-b972113a0aa5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.362132 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f258d23-9186-49df-a26e-b972113a0aa5-kube-api-access-c2fc8" (OuterVolumeSpecName: "kube-api-access-c2fc8") pod "5f258d23-9186-49df-a26e-b972113a0aa5" (UID: "5f258d23-9186-49df-a26e-b972113a0aa5"). InnerVolumeSpecName "kube-api-access-c2fc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.366747 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" (UID: "00e4c0c5-80a0-49c9-8a9d-46c6725f15f2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.374709 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g"] Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.449525 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-client-ca\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.449614 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75j8m\" (UniqueName: \"kubernetes.io/projected/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-kube-api-access-75j8m\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.449716 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-config\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.449790 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-serving-cert\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450081 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f258d23-9186-49df-a26e-b972113a0aa5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450105 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450119 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k8qz\" (UniqueName: \"kubernetes.io/projected/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-kube-api-access-7k8qz\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450132 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450143 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f258d23-9186-49df-a26e-b972113a0aa5-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450154 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450169 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2fc8\" (UniqueName: \"kubernetes.io/projected/5f258d23-9186-49df-a26e-b972113a0aa5-kube-api-access-c2fc8\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450180 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.450191 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.552780 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-client-ca\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.551652 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-client-ca\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.552898 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75j8m\" (UniqueName: \"kubernetes.io/projected/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-kube-api-access-75j8m\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.552928 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-config\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.553416 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-serving-cert\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.553891 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-config\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.556904 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-serving-cert\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.569064 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75j8m\" (UniqueName: \"kubernetes.io/projected/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-kube-api-access-75j8m\") pod \"route-controller-manager-db6968548-dsc6g\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.697577 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.840978 4682 generic.go:334] "Generic (PLEG): container finished" podID="5f258d23-9186-49df-a26e-b972113a0aa5" containerID="34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab" exitCode=0 Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.841064 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.841106 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" event={"ID":"5f258d23-9186-49df-a26e-b972113a0aa5","Type":"ContainerDied","Data":"34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab"} Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.841819 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-g927h" event={"ID":"5f258d23-9186-49df-a26e-b972113a0aa5","Type":"ContainerDied","Data":"a1687e5cc5448377e75c5c97d77d960ba7f9ccf948f277aeaa0698babded54c9"} Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.841848 4682 scope.go:117] "RemoveContainer" containerID="34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.846340 4682 generic.go:334] "Generic (PLEG): container finished" podID="00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" containerID="f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f" exitCode=0 Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.846396 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.846514 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" event={"ID":"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2","Type":"ContainerDied","Data":"f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f"} Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.846560 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555699bcb5-czlxh" event={"ID":"00e4c0c5-80a0-49c9-8a9d-46c6725f15f2","Type":"ContainerDied","Data":"5e8e318fa41dd679a75940f63a97adf77456c592f83dc31315e7c7825c90805e"} Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.871061 4682 scope.go:117] "RemoveContainer" containerID="34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.877296 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-g927h"] Dec 10 10:50:31 crc kubenswrapper[4682]: E1210 10:50:31.877655 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab\": container with ID starting with 34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab not found: ID does not exist" containerID="34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.877713 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab"} err="failed to get container status \"34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab\": rpc error: code = NotFound desc = could not find container \"34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab\": container with ID starting with 34b077e048a2cc8c4367a295374f3c9297b3a506019d803289df39f98c6a6aab not found: ID does not exist" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.877773 4682 scope.go:117] "RemoveContainer" containerID="f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.879848 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-g927h"] Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.894902 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-czlxh"] Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.901271 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-czlxh"] Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.904678 4682 scope.go:117] "RemoveContainer" containerID="f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f" Dec 10 10:50:31 crc kubenswrapper[4682]: E1210 10:50:31.905335 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f\": container with ID starting with f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f not found: ID does not exist" containerID="f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.905390 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f"} err="failed to get container status \"f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f\": rpc error: code = NotFound desc = could not find container \"f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f\": container with ID starting with f0da8ca1bec7de62d60cfa039ce44f6550beb2872f3e898ab37977dff82a884f not found: ID does not exist" Dec 10 10:50:31 crc kubenswrapper[4682]: I1210 10:50:31.956027 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g"] Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.402973 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00e4c0c5-80a0-49c9-8a9d-46c6725f15f2" path="/var/lib/kubelet/pods/00e4c0c5-80a0-49c9-8a9d-46c6725f15f2/volumes" Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.403704 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f258d23-9186-49df-a26e-b972113a0aa5" path="/var/lib/kubelet/pods/5f258d23-9186-49df-a26e-b972113a0aa5/volumes" Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.856511 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" event={"ID":"8fab91b4-eeca-445c-b3f5-9ccd3561aa12","Type":"ContainerStarted","Data":"29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f"} Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.856550 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" event={"ID":"8fab91b4-eeca-445c-b3f5-9ccd3561aa12","Type":"ContainerStarted","Data":"7047695bc38f8df448b653e3275c11e18b3e7763764bfbe82b99402364f87f14"} Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.857244 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.868086 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:50:32 crc kubenswrapper[4682]: I1210 10:50:32.886061 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" podStartSLOduration=3.886040483 podStartE2EDuration="3.886040483s" podCreationTimestamp="2025-12-10 10:50:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:32.881322167 +0000 UTC m=+313.201532967" watchObservedRunningTime="2025-12-10 10:50:32.886040483 +0000 UTC m=+313.206251253" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.538893 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl"] Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.539875 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.542449 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.542609 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.544785 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.545137 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.547656 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.552637 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.554580 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.559872 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl"] Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.574949 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-proxy-ca-bundles\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.575233 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-client-ca\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.575344 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b50dcb6-c898-46c8-9770-b5391346d900-serving-cert\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.575503 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjxtz\" (UniqueName: \"kubernetes.io/projected/9b50dcb6-c898-46c8-9770-b5391346d900-kube-api-access-qjxtz\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.575797 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-config\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.676959 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-client-ca\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.677253 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b50dcb6-c898-46c8-9770-b5391346d900-serving-cert\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.677378 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjxtz\" (UniqueName: \"kubernetes.io/projected/9b50dcb6-c898-46c8-9770-b5391346d900-kube-api-access-qjxtz\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.677535 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-config\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.677652 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-proxy-ca-bundles\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.678369 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-client-ca\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.678846 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-proxy-ca-bundles\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.679861 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-config\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.684778 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b50dcb6-c898-46c8-9770-b5391346d900-serving-cert\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.697272 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjxtz\" (UniqueName: \"kubernetes.io/projected/9b50dcb6-c898-46c8-9770-b5391346d900-kube-api-access-qjxtz\") pod \"controller-manager-75c57f8b8b-5gqxl\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:33 crc kubenswrapper[4682]: I1210 10:50:33.868800 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:34 crc kubenswrapper[4682]: I1210 10:50:34.052297 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl"] Dec 10 10:50:34 crc kubenswrapper[4682]: W1210 10:50:34.055907 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b50dcb6_c898_46c8_9770_b5391346d900.slice/crio-d23c5a35c8bdaa347e63a4cc88a6e8b162280b5ba00856a6ba7bb1af2f6dd1e4 WatchSource:0}: Error finding container d23c5a35c8bdaa347e63a4cc88a6e8b162280b5ba00856a6ba7bb1af2f6dd1e4: Status 404 returned error can't find the container with id d23c5a35c8bdaa347e63a4cc88a6e8b162280b5ba00856a6ba7bb1af2f6dd1e4 Dec 10 10:50:34 crc kubenswrapper[4682]: I1210 10:50:34.869494 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" event={"ID":"9b50dcb6-c898-46c8-9770-b5391346d900","Type":"ContainerStarted","Data":"2452089758b3b1ed90f39ea9525501d1441ed246350fc2253cf3d5aac81e0ca8"} Dec 10 10:50:34 crc kubenswrapper[4682]: I1210 10:50:34.869545 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" event={"ID":"9b50dcb6-c898-46c8-9770-b5391346d900","Type":"ContainerStarted","Data":"d23c5a35c8bdaa347e63a4cc88a6e8b162280b5ba00856a6ba7bb1af2f6dd1e4"} Dec 10 10:50:34 crc kubenswrapper[4682]: I1210 10:50:34.869775 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:34 crc kubenswrapper[4682]: I1210 10:50:34.875281 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:34 crc kubenswrapper[4682]: I1210 10:50:34.913926 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" podStartSLOduration=5.913905333 podStartE2EDuration="5.913905333s" podCreationTimestamp="2025-12-10 10:50:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:34.890430763 +0000 UTC m=+315.210641523" watchObservedRunningTime="2025-12-10 10:50:34.913905333 +0000 UTC m=+315.234116083" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.146306 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-lqc89"] Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.147514 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.158318 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-lqc89"] Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.319885 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v29gw\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-kube-api-access-v29gw\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.319949 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-registry-tls\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.319971 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-trusted-ca\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.320022 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-ca-trust-extracted\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.320111 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-registry-certificates\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.320130 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-installation-pull-secrets\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.320372 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.320566 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-bound-sa-token\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.346994 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.422140 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v29gw\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-kube-api-access-v29gw\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.422620 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-registry-tls\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.423513 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-trusted-ca\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.423544 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-ca-trust-extracted\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.423591 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-registry-certificates\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.423622 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-installation-pull-secrets\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.423699 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-bound-sa-token\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.424108 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-ca-trust-extracted\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.424883 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-registry-certificates\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.425014 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-trusted-ca\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.430347 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-registry-tls\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.431969 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-installation-pull-secrets\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.442575 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v29gw\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-kube-api-access-v29gw\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.446995 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e01e4ac5-c5d4-4e11-8659-1ce54d00a471-bound-sa-token\") pod \"image-registry-66df7c8f76-lqc89\" (UID: \"e01e4ac5-c5d4-4e11-8659-1ce54d00a471\") " pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.464275 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.851332 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-lqc89"] Dec 10 10:50:37 crc kubenswrapper[4682]: W1210 10:50:37.858006 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode01e4ac5_c5d4_4e11_8659_1ce54d00a471.slice/crio-d2a294238e2bc19715d63d691641b0febd5516cda1475d6cd7f9ad9559a45ab0 WatchSource:0}: Error finding container d2a294238e2bc19715d63d691641b0febd5516cda1475d6cd7f9ad9559a45ab0: Status 404 returned error can't find the container with id d2a294238e2bc19715d63d691641b0febd5516cda1475d6cd7f9ad9559a45ab0 Dec 10 10:50:37 crc kubenswrapper[4682]: I1210 10:50:37.886193 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" event={"ID":"e01e4ac5-c5d4-4e11-8659-1ce54d00a471","Type":"ContainerStarted","Data":"d2a294238e2bc19715d63d691641b0febd5516cda1475d6cd7f9ad9559a45ab0"} Dec 10 10:50:38 crc kubenswrapper[4682]: I1210 10:50:38.892485 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" event={"ID":"e01e4ac5-c5d4-4e11-8659-1ce54d00a471","Type":"ContainerStarted","Data":"05700713c4f4b59d67597f8da3e20c442dd7e7491ccfcd655a385a0f3ebed95e"} Dec 10 10:50:38 crc kubenswrapper[4682]: I1210 10:50:38.893761 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:47 crc kubenswrapper[4682]: I1210 10:50:47.476002 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" podStartSLOduration=10.475959667 podStartE2EDuration="10.475959667s" podCreationTimestamp="2025-12-10 10:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:38.908940542 +0000 UTC m=+319.229151302" watchObservedRunningTime="2025-12-10 10:50:47.475959667 +0000 UTC m=+327.796170437" Dec 10 10:50:47 crc kubenswrapper[4682]: I1210 10:50:47.494394 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl"] Dec 10 10:50:47 crc kubenswrapper[4682]: I1210 10:50:47.494671 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" podUID="9b50dcb6-c898-46c8-9770-b5391346d900" containerName="controller-manager" containerID="cri-o://2452089758b3b1ed90f39ea9525501d1441ed246350fc2253cf3d5aac81e0ca8" gracePeriod=30 Dec 10 10:50:47 crc kubenswrapper[4682]: I1210 10:50:47.943533 4682 generic.go:334] "Generic (PLEG): container finished" podID="9b50dcb6-c898-46c8-9770-b5391346d900" containerID="2452089758b3b1ed90f39ea9525501d1441ed246350fc2253cf3d5aac81e0ca8" exitCode=0 Dec 10 10:50:47 crc kubenswrapper[4682]: I1210 10:50:47.943641 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" event={"ID":"9b50dcb6-c898-46c8-9770-b5391346d900","Type":"ContainerDied","Data":"2452089758b3b1ed90f39ea9525501d1441ed246350fc2253cf3d5aac81e0ca8"} Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.086022 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.170445 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-config\") pod \"9b50dcb6-c898-46c8-9770-b5391346d900\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.170530 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b50dcb6-c898-46c8-9770-b5391346d900-serving-cert\") pod \"9b50dcb6-c898-46c8-9770-b5391346d900\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.170579 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjxtz\" (UniqueName: \"kubernetes.io/projected/9b50dcb6-c898-46c8-9770-b5391346d900-kube-api-access-qjxtz\") pod \"9b50dcb6-c898-46c8-9770-b5391346d900\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.170605 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-client-ca\") pod \"9b50dcb6-c898-46c8-9770-b5391346d900\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.170631 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-proxy-ca-bundles\") pod \"9b50dcb6-c898-46c8-9770-b5391346d900\" (UID: \"9b50dcb6-c898-46c8-9770-b5391346d900\") " Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.171331 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9b50dcb6-c898-46c8-9770-b5391346d900" (UID: "9b50dcb6-c898-46c8-9770-b5391346d900"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.171359 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-client-ca" (OuterVolumeSpecName: "client-ca") pod "9b50dcb6-c898-46c8-9770-b5391346d900" (UID: "9b50dcb6-c898-46c8-9770-b5391346d900"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.171489 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-config" (OuterVolumeSpecName: "config") pod "9b50dcb6-c898-46c8-9770-b5391346d900" (UID: "9b50dcb6-c898-46c8-9770-b5391346d900"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.178052 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b50dcb6-c898-46c8-9770-b5391346d900-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9b50dcb6-c898-46c8-9770-b5391346d900" (UID: "9b50dcb6-c898-46c8-9770-b5391346d900"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.186158 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b50dcb6-c898-46c8-9770-b5391346d900-kube-api-access-qjxtz" (OuterVolumeSpecName: "kube-api-access-qjxtz") pod "9b50dcb6-c898-46c8-9770-b5391346d900" (UID: "9b50dcb6-c898-46c8-9770-b5391346d900"). InnerVolumeSpecName "kube-api-access-qjxtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.272645 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjxtz\" (UniqueName: \"kubernetes.io/projected/9b50dcb6-c898-46c8-9770-b5391346d900-kube-api-access-qjxtz\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.272711 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.272728 4682 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.272770 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b50dcb6-c898-46c8-9770-b5391346d900-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.272785 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9b50dcb6-c898-46c8-9770-b5391346d900-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.547699 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-vcxn5"] Dec 10 10:50:48 crc kubenswrapper[4682]: E1210 10:50:48.547951 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b50dcb6-c898-46c8-9770-b5391346d900" containerName="controller-manager" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.547968 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b50dcb6-c898-46c8-9770-b5391346d900" containerName="controller-manager" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.548083 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b50dcb6-c898-46c8-9770-b5391346d900" containerName="controller-manager" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.548562 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.558126 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-vcxn5"] Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.678666 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-client-ca\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.678891 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-proxy-ca-bundles\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.678979 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r85cq\" (UniqueName: \"kubernetes.io/projected/8dcc8981-3da8-4c08-962d-95a852d0db46-kube-api-access-r85cq\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.679152 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8dcc8981-3da8-4c08-962d-95a852d0db46-serving-cert\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.679316 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-config\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.781238 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-proxy-ca-bundles\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.781277 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r85cq\" (UniqueName: \"kubernetes.io/projected/8dcc8981-3da8-4c08-962d-95a852d0db46-kube-api-access-r85cq\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.781316 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8dcc8981-3da8-4c08-962d-95a852d0db46-serving-cert\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.781354 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-config\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.781403 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-client-ca\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.782571 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-client-ca\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.783456 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-config\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.783637 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8dcc8981-3da8-4c08-962d-95a852d0db46-proxy-ca-bundles\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.785837 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8dcc8981-3da8-4c08-962d-95a852d0db46-serving-cert\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.799373 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r85cq\" (UniqueName: \"kubernetes.io/projected/8dcc8981-3da8-4c08-962d-95a852d0db46-kube-api-access-r85cq\") pod \"controller-manager-555699bcb5-vcxn5\" (UID: \"8dcc8981-3da8-4c08-962d-95a852d0db46\") " pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.863876 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.953332 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" event={"ID":"9b50dcb6-c898-46c8-9770-b5391346d900","Type":"ContainerDied","Data":"d23c5a35c8bdaa347e63a4cc88a6e8b162280b5ba00856a6ba7bb1af2f6dd1e4"} Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.953405 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.953712 4682 scope.go:117] "RemoveContainer" containerID="2452089758b3b1ed90f39ea9525501d1441ed246350fc2253cf3d5aac81e0ca8" Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.973777 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl"] Dec 10 10:50:48 crc kubenswrapper[4682]: I1210 10:50:48.978749 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-75c57f8b8b-5gqxl"] Dec 10 10:50:49 crc kubenswrapper[4682]: I1210 10:50:49.251206 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-555699bcb5-vcxn5"] Dec 10 10:50:49 crc kubenswrapper[4682]: I1210 10:50:49.960321 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" event={"ID":"8dcc8981-3da8-4c08-962d-95a852d0db46","Type":"ContainerStarted","Data":"377383f59d0e4c0426c92787dbd620b5ddd90c584987aac4d2e58aabd8e45c04"} Dec 10 10:50:49 crc kubenswrapper[4682]: I1210 10:50:49.960697 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:49 crc kubenswrapper[4682]: I1210 10:50:49.960719 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" event={"ID":"8dcc8981-3da8-4c08-962d-95a852d0db46","Type":"ContainerStarted","Data":"ef638763a0bc003efd385c0ee3f96b6de40f0dad25bce681d18046db2c6784bb"} Dec 10 10:50:49 crc kubenswrapper[4682]: I1210 10:50:49.964569 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" Dec 10 10:50:49 crc kubenswrapper[4682]: I1210 10:50:49.979588 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-555699bcb5-vcxn5" podStartSLOduration=2.97956676 podStartE2EDuration="2.97956676s" podCreationTimestamp="2025-12-10 10:50:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:50:49.979067583 +0000 UTC m=+330.299278353" watchObservedRunningTime="2025-12-10 10:50:49.97956676 +0000 UTC m=+330.299777530" Dec 10 10:50:50 crc kubenswrapper[4682]: I1210 10:50:50.387770 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b50dcb6-c898-46c8-9770-b5391346d900" path="/var/lib/kubelet/pods/9b50dcb6-c898-46c8-9770-b5391346d900/volumes" Dec 10 10:50:57 crc kubenswrapper[4682]: I1210 10:50:57.469598 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-lqc89" Dec 10 10:50:57 crc kubenswrapper[4682]: I1210 10:50:57.522562 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mpnmc"] Dec 10 10:51:07 crc kubenswrapper[4682]: I1210 10:51:07.506852 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g"] Dec 10 10:51:07 crc kubenswrapper[4682]: I1210 10:51:07.507499 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" podUID="8fab91b4-eeca-445c-b3f5-9ccd3561aa12" containerName="route-controller-manager" containerID="cri-o://29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f" gracePeriod=30 Dec 10 10:51:07 crc kubenswrapper[4682]: I1210 10:51:07.984382 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.060604 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-serving-cert\") pod \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.060703 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-client-ca\") pod \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.060730 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75j8m\" (UniqueName: \"kubernetes.io/projected/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-kube-api-access-75j8m\") pod \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.060806 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-config\") pod \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\" (UID: \"8fab91b4-eeca-445c-b3f5-9ccd3561aa12\") " Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.061731 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-config" (OuterVolumeSpecName: "config") pod "8fab91b4-eeca-445c-b3f5-9ccd3561aa12" (UID: "8fab91b4-eeca-445c-b3f5-9ccd3561aa12"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.061762 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-client-ca" (OuterVolumeSpecName: "client-ca") pod "8fab91b4-eeca-445c-b3f5-9ccd3561aa12" (UID: "8fab91b4-eeca-445c-b3f5-9ccd3561aa12"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.066212 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-kube-api-access-75j8m" (OuterVolumeSpecName: "kube-api-access-75j8m") pod "8fab91b4-eeca-445c-b3f5-9ccd3561aa12" (UID: "8fab91b4-eeca-445c-b3f5-9ccd3561aa12"). InnerVolumeSpecName "kube-api-access-75j8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.067070 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8fab91b4-eeca-445c-b3f5-9ccd3561aa12" (UID: "8fab91b4-eeca-445c-b3f5-9ccd3561aa12"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.067180 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.067093 4682 generic.go:334] "Generic (PLEG): container finished" podID="8fab91b4-eeca-445c-b3f5-9ccd3561aa12" containerID="29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f" exitCode=0 Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.067262 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" event={"ID":"8fab91b4-eeca-445c-b3f5-9ccd3561aa12","Type":"ContainerDied","Data":"29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f"} Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.067381 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g" event={"ID":"8fab91b4-eeca-445c-b3f5-9ccd3561aa12","Type":"ContainerDied","Data":"7047695bc38f8df448b653e3275c11e18b3e7763764bfbe82b99402364f87f14"} Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.067452 4682 scope.go:117] "RemoveContainer" containerID="29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.114082 4682 scope.go:117] "RemoveContainer" containerID="29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f" Dec 10 10:51:08 crc kubenswrapper[4682]: E1210 10:51:08.115043 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f\": container with ID starting with 29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f not found: ID does not exist" containerID="29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.115213 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f"} err="failed to get container status \"29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f\": rpc error: code = NotFound desc = could not find container \"29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f\": container with ID starting with 29ff0d0c9a47f121cf0ccdd6052952e41397fd6ce1da592205defc80ddb1645f not found: ID does not exist" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.134620 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g"] Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.147147 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-db6968548-dsc6g"] Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.161813 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75j8m\" (UniqueName: \"kubernetes.io/projected/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-kube-api-access-75j8m\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.161852 4682 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.161863 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.161872 4682 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fab91b4-eeca-445c-b3f5-9ccd3561aa12-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:08 crc kubenswrapper[4682]: I1210 10:51:08.389010 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fab91b4-eeca-445c-b3f5-9ccd3561aa12" path="/var/lib/kubelet/pods/8fab91b4-eeca-445c-b3f5-9ccd3561aa12/volumes" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.568117 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv"] Dec 10 10:51:09 crc kubenswrapper[4682]: E1210 10:51:09.569752 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fab91b4-eeca-445c-b3f5-9ccd3561aa12" containerName="route-controller-manager" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.569840 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fab91b4-eeca-445c-b3f5-9ccd3561aa12" containerName="route-controller-manager" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.569993 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fab91b4-eeca-445c-b3f5-9ccd3561aa12" containerName="route-controller-manager" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.570487 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.573684 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.574065 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.574328 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.574383 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.574446 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.575263 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.577640 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv"] Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.681131 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxp2j\" (UniqueName: \"kubernetes.io/projected/5315ce24-cff2-4307-bfd4-dde0439b39e8-kube-api-access-zxp2j\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.681194 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5315ce24-cff2-4307-bfd4-dde0439b39e8-client-ca\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.681249 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5315ce24-cff2-4307-bfd4-dde0439b39e8-config\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.681285 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5315ce24-cff2-4307-bfd4-dde0439b39e8-serving-cert\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.782453 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxp2j\" (UniqueName: \"kubernetes.io/projected/5315ce24-cff2-4307-bfd4-dde0439b39e8-kube-api-access-zxp2j\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.782531 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5315ce24-cff2-4307-bfd4-dde0439b39e8-client-ca\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.782599 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5315ce24-cff2-4307-bfd4-dde0439b39e8-config\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.782645 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5315ce24-cff2-4307-bfd4-dde0439b39e8-serving-cert\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.783586 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5315ce24-cff2-4307-bfd4-dde0439b39e8-client-ca\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.783737 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5315ce24-cff2-4307-bfd4-dde0439b39e8-config\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.788571 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5315ce24-cff2-4307-bfd4-dde0439b39e8-serving-cert\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.801298 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxp2j\" (UniqueName: \"kubernetes.io/projected/5315ce24-cff2-4307-bfd4-dde0439b39e8-kube-api-access-zxp2j\") pod \"route-controller-manager-55448fc787-nmpdv\" (UID: \"5315ce24-cff2-4307-bfd4-dde0439b39e8\") " pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:09 crc kubenswrapper[4682]: I1210 10:51:09.894730 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.281282 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.757954 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j6tv8"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.758553 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j6tv8" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="registry-server" containerID="cri-o://8f0525a2dd2ae201c7299b3546f256fee3ac4b8e1aa4c4b30b16fb3e06052160" gracePeriod=30 Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.769826 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lnqbd"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.770664 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lnqbd" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="registry-server" containerID="cri-o://9a8678ea12f8ba4f7e351f44986fcce02eb81b668338871538acd709d7692fc0" gracePeriod=30 Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.778894 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t9w8x"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.779080 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" containerID="cri-o://d382e979d0011c2522f3335a6c2587e2a253fcd79a4c608a7cbcf89815349ca7" gracePeriod=30 Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.783763 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gp62l"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.784025 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gp62l" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="registry-server" containerID="cri-o://c0a5175fd229cf8c9a76d2d8ab7f653c4312747059eadbc9fc1dcf879602ede2" gracePeriod=30 Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.790099 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4jlk2"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.790318 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4jlk2" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="registry-server" containerID="cri-o://b74aca3d602de956224fdc13b9befbe10b85daa875b0b2c34a8dde450ef39dff" gracePeriod=30 Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.805077 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pcvj2"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.805806 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.824228 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pcvj2"] Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.901252 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27962a48-9d75-4437-bc45-9258a223ebbb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.901371 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/27962a48-9d75-4437-bc45-9258a223ebbb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:10 crc kubenswrapper[4682]: I1210 10:51:10.901525 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj6fn\" (UniqueName: \"kubernetes.io/projected/27962a48-9d75-4437-bc45-9258a223ebbb-kube-api-access-tj6fn\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.002393 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/27962a48-9d75-4437-bc45-9258a223ebbb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.002492 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj6fn\" (UniqueName: \"kubernetes.io/projected/27962a48-9d75-4437-bc45-9258a223ebbb-kube-api-access-tj6fn\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.002532 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27962a48-9d75-4437-bc45-9258a223ebbb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.003758 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/27962a48-9d75-4437-bc45-9258a223ebbb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.009411 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/27962a48-9d75-4437-bc45-9258a223ebbb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.026970 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj6fn\" (UniqueName: \"kubernetes.io/projected/27962a48-9d75-4437-bc45-9258a223ebbb-kube-api-access-tj6fn\") pod \"marketplace-operator-79b997595-pcvj2\" (UID: \"27962a48-9d75-4437-bc45-9258a223ebbb\") " pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.085515 4682 generic.go:334] "Generic (PLEG): container finished" podID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerID="9a8678ea12f8ba4f7e351f44986fcce02eb81b668338871538acd709d7692fc0" exitCode=0 Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.085615 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lnqbd" event={"ID":"3ee34116-c378-4109-a0a2-e5ea084c98ad","Type":"ContainerDied","Data":"9a8678ea12f8ba4f7e351f44986fcce02eb81b668338871538acd709d7692fc0"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.087974 4682 generic.go:334] "Generic (PLEG): container finished" podID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerID="8f0525a2dd2ae201c7299b3546f256fee3ac4b8e1aa4c4b30b16fb3e06052160" exitCode=0 Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.088047 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerDied","Data":"8f0525a2dd2ae201c7299b3546f256fee3ac4b8e1aa4c4b30b16fb3e06052160"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.091343 4682 generic.go:334] "Generic (PLEG): container finished" podID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerID="b74aca3d602de956224fdc13b9befbe10b85daa875b0b2c34a8dde450ef39dff" exitCode=0 Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.091403 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerDied","Data":"b74aca3d602de956224fdc13b9befbe10b85daa875b0b2c34a8dde450ef39dff"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.098399 4682 generic.go:334] "Generic (PLEG): container finished" podID="06e73e24-a522-4e08-98e0-5199a83b016f" containerID="c0a5175fd229cf8c9a76d2d8ab7f653c4312747059eadbc9fc1dcf879602ede2" exitCode=0 Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.098495 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerDied","Data":"c0a5175fd229cf8c9a76d2d8ab7f653c4312747059eadbc9fc1dcf879602ede2"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.099739 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" event={"ID":"5315ce24-cff2-4307-bfd4-dde0439b39e8","Type":"ContainerStarted","Data":"355419ea44ec41d9336f7ace3b533a51ff250bd621539ca99e62a38fcfe3f031"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.099765 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" event={"ID":"5315ce24-cff2-4307-bfd4-dde0439b39e8","Type":"ContainerStarted","Data":"3e4db1a612e0f0ae95594ca7e21c03c61edc98f0939df2ab4c790a958457cab9"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.101194 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.102609 4682 generic.go:334] "Generic (PLEG): container finished" podID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerID="d382e979d0011c2522f3335a6c2587e2a253fcd79a4c608a7cbcf89815349ca7" exitCode=0 Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.102637 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" event={"ID":"c39ff528-9225-4c16-b25d-1b34929dadcb","Type":"ContainerDied","Data":"d382e979d0011c2522f3335a6c2587e2a253fcd79a4c608a7cbcf89815349ca7"} Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.107613 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.122268 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-55448fc787-nmpdv" podStartSLOduration=4.122243382 podStartE2EDuration="4.122243382s" podCreationTimestamp="2025-12-10 10:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:51:11.11764676 +0000 UTC m=+351.437857540" watchObservedRunningTime="2025-12-10 10:51:11.122243382 +0000 UTC m=+351.442454132" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.128249 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.320096 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.410033 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7nl5\" (UniqueName: \"kubernetes.io/projected/87412cec-b4af-4f63-a127-4ba4214d57b8-kube-api-access-f7nl5\") pod \"87412cec-b4af-4f63-a127-4ba4214d57b8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.410151 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-utilities\") pod \"87412cec-b4af-4f63-a127-4ba4214d57b8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.410190 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-catalog-content\") pod \"87412cec-b4af-4f63-a127-4ba4214d57b8\" (UID: \"87412cec-b4af-4f63-a127-4ba4214d57b8\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.418528 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-utilities" (OuterVolumeSpecName: "utilities") pod "87412cec-b4af-4f63-a127-4ba4214d57b8" (UID: "87412cec-b4af-4f63-a127-4ba4214d57b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.423247 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87412cec-b4af-4f63-a127-4ba4214d57b8-kube-api-access-f7nl5" (OuterVolumeSpecName: "kube-api-access-f7nl5") pod "87412cec-b4af-4f63-a127-4ba4214d57b8" (UID: "87412cec-b4af-4f63-a127-4ba4214d57b8"). InnerVolumeSpecName "kube-api-access-f7nl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.467000 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87412cec-b4af-4f63-a127-4ba4214d57b8" (UID: "87412cec-b4af-4f63-a127-4ba4214d57b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.503528 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.512641 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7nl5\" (UniqueName: \"kubernetes.io/projected/87412cec-b4af-4f63-a127-4ba4214d57b8-kube-api-access-f7nl5\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.512700 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.512715 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87412cec-b4af-4f63-a127-4ba4214d57b8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.540169 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.545151 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.573582 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.613344 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-catalog-content\") pod \"7ec82d4e-7aac-438d-ada1-ec31302939a7\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.613433 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-operator-metrics\") pod \"c39ff528-9225-4c16-b25d-1b34929dadcb\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.613457 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-utilities\") pod \"7ec82d4e-7aac-438d-ada1-ec31302939a7\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.613528 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn6mj\" (UniqueName: \"kubernetes.io/projected/c39ff528-9225-4c16-b25d-1b34929dadcb-kube-api-access-jn6mj\") pod \"c39ff528-9225-4c16-b25d-1b34929dadcb\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.613587 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2224j\" (UniqueName: \"kubernetes.io/projected/7ec82d4e-7aac-438d-ada1-ec31302939a7-kube-api-access-2224j\") pod \"7ec82d4e-7aac-438d-ada1-ec31302939a7\" (UID: \"7ec82d4e-7aac-438d-ada1-ec31302939a7\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.613769 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-trusted-ca\") pod \"c39ff528-9225-4c16-b25d-1b34929dadcb\" (UID: \"c39ff528-9225-4c16-b25d-1b34929dadcb\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.614458 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-utilities" (OuterVolumeSpecName: "utilities") pod "7ec82d4e-7aac-438d-ada1-ec31302939a7" (UID: "7ec82d4e-7aac-438d-ada1-ec31302939a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.614552 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "c39ff528-9225-4c16-b25d-1b34929dadcb" (UID: "c39ff528-9225-4c16-b25d-1b34929dadcb"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.616628 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c39ff528-9225-4c16-b25d-1b34929dadcb-kube-api-access-jn6mj" (OuterVolumeSpecName: "kube-api-access-jn6mj") pod "c39ff528-9225-4c16-b25d-1b34929dadcb" (UID: "c39ff528-9225-4c16-b25d-1b34929dadcb"). InnerVolumeSpecName "kube-api-access-jn6mj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.617054 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "c39ff528-9225-4c16-b25d-1b34929dadcb" (UID: "c39ff528-9225-4c16-b25d-1b34929dadcb"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.617174 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ec82d4e-7aac-438d-ada1-ec31302939a7-kube-api-access-2224j" (OuterVolumeSpecName: "kube-api-access-2224j") pod "7ec82d4e-7aac-438d-ada1-ec31302939a7" (UID: "7ec82d4e-7aac-438d-ada1-ec31302939a7"). InnerVolumeSpecName "kube-api-access-2224j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.685201 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-pcvj2"] Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715312 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-utilities\") pod \"3ee34116-c378-4109-a0a2-e5ea084c98ad\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715380 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-catalog-content\") pod \"06e73e24-a522-4e08-98e0-5199a83b016f\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715432 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-utilities\") pod \"06e73e24-a522-4e08-98e0-5199a83b016f\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715459 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqm7j\" (UniqueName: \"kubernetes.io/projected/06e73e24-a522-4e08-98e0-5199a83b016f-kube-api-access-xqm7j\") pod \"06e73e24-a522-4e08-98e0-5199a83b016f\" (UID: \"06e73e24-a522-4e08-98e0-5199a83b016f\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715514 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22bfm\" (UniqueName: \"kubernetes.io/projected/3ee34116-c378-4109-a0a2-e5ea084c98ad-kube-api-access-22bfm\") pod \"3ee34116-c378-4109-a0a2-e5ea084c98ad\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715548 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-catalog-content\") pod \"3ee34116-c378-4109-a0a2-e5ea084c98ad\" (UID: \"3ee34116-c378-4109-a0a2-e5ea084c98ad\") " Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715822 4682 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715843 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715855 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn6mj\" (UniqueName: \"kubernetes.io/projected/c39ff528-9225-4c16-b25d-1b34929dadcb-kube-api-access-jn6mj\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715866 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2224j\" (UniqueName: \"kubernetes.io/projected/7ec82d4e-7aac-438d-ada1-ec31302939a7-kube-api-access-2224j\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.715878 4682 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c39ff528-9225-4c16-b25d-1b34929dadcb-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.716585 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-utilities" (OuterVolumeSpecName: "utilities") pod "06e73e24-a522-4e08-98e0-5199a83b016f" (UID: "06e73e24-a522-4e08-98e0-5199a83b016f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.718344 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-utilities" (OuterVolumeSpecName: "utilities") pod "3ee34116-c378-4109-a0a2-e5ea084c98ad" (UID: "3ee34116-c378-4109-a0a2-e5ea084c98ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.718367 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06e73e24-a522-4e08-98e0-5199a83b016f-kube-api-access-xqm7j" (OuterVolumeSpecName: "kube-api-access-xqm7j") pod "06e73e24-a522-4e08-98e0-5199a83b016f" (UID: "06e73e24-a522-4e08-98e0-5199a83b016f"). InnerVolumeSpecName "kube-api-access-xqm7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.718939 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ee34116-c378-4109-a0a2-e5ea084c98ad-kube-api-access-22bfm" (OuterVolumeSpecName: "kube-api-access-22bfm") pod "3ee34116-c378-4109-a0a2-e5ea084c98ad" (UID: "3ee34116-c378-4109-a0a2-e5ea084c98ad"). InnerVolumeSpecName "kube-api-access-22bfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.739090 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06e73e24-a522-4e08-98e0-5199a83b016f" (UID: "06e73e24-a522-4e08-98e0-5199a83b016f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.748990 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7ec82d4e-7aac-438d-ada1-ec31302939a7" (UID: "7ec82d4e-7aac-438d-ada1-ec31302939a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.777013 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ee34116-c378-4109-a0a2-e5ea084c98ad" (UID: "3ee34116-c378-4109-a0a2-e5ea084c98ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.817675 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.817738 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqm7j\" (UniqueName: \"kubernetes.io/projected/06e73e24-a522-4e08-98e0-5199a83b016f-kube-api-access-xqm7j\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.817756 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22bfm\" (UniqueName: \"kubernetes.io/projected/3ee34116-c378-4109-a0a2-e5ea084c98ad-kube-api-access-22bfm\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.817767 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.817779 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee34116-c378-4109-a0a2-e5ea084c98ad-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.817790 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06e73e24-a522-4e08-98e0-5199a83b016f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:11 crc kubenswrapper[4682]: I1210 10:51:11.818030 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7ec82d4e-7aac-438d-ada1-ec31302939a7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.110227 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j6tv8" event={"ID":"87412cec-b4af-4f63-a127-4ba4214d57b8","Type":"ContainerDied","Data":"12c72aee5ae09367ba3b0e9a60b86ee7af7122953ec159c4dc35688ba9962c68"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.110619 4682 scope.go:117] "RemoveContainer" containerID="8f0525a2dd2ae201c7299b3546f256fee3ac4b8e1aa4c4b30b16fb3e06052160" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.110763 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j6tv8" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.118728 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4jlk2" event={"ID":"7ec82d4e-7aac-438d-ada1-ec31302939a7","Type":"ContainerDied","Data":"c1c6856bc4df669c562405ad6ee03f0bb4cacc1dc0459b6be5132d3893b11e05"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.118832 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4jlk2" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.133062 4682 scope.go:117] "RemoveContainer" containerID="b354cc0581f8c71948c7ab21185285b0138f320450880a189c24ff674f1911c5" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.133116 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gp62l" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.133165 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gp62l" event={"ID":"06e73e24-a522-4e08-98e0-5199a83b016f","Type":"ContainerDied","Data":"299e66b980c5e1ebf6deb53390f7daa597da1ec494e4acdea5a5061e1f891f04"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.138071 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" event={"ID":"27962a48-9d75-4437-bc45-9258a223ebbb","Type":"ContainerStarted","Data":"dc0b80f7b03d40518eb750526fec2cd276cf7864c1a9e0e3964d37916fafbff9"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.138122 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" event={"ID":"27962a48-9d75-4437-bc45-9258a223ebbb","Type":"ContainerStarted","Data":"77e8a42c7a799a545ac48127566306085001c36ba01267d546bc45ec97c42e16"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.139272 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.143708 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.152425 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.152514 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t9w8x" event={"ID":"c39ff528-9225-4c16-b25d-1b34929dadcb","Type":"ContainerDied","Data":"8c15d96946c9d5e6944459e2840483eed07a0da73a3d4eebf59648589f997ae3"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.155146 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lnqbd" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.155580 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lnqbd" event={"ID":"3ee34116-c378-4109-a0a2-e5ea084c98ad","Type":"ContainerDied","Data":"825231ed8ff959b63dd5700ee52ed195d562f8242f050f17d739673f47e8a9fc"} Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.155629 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j6tv8"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.162503 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j6tv8"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.163628 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" podStartSLOduration=2.163613702 podStartE2EDuration="2.163613702s" podCreationTimestamp="2025-12-10 10:51:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:51:12.15596947 +0000 UTC m=+352.476180240" watchObservedRunningTime="2025-12-10 10:51:12.163613702 +0000 UTC m=+352.483824452" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.169889 4682 scope.go:117] "RemoveContainer" containerID="9855b10135f5e3382cd0db5865225b80cfab8be62830bc82781bd3eb1e929ad8" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.189133 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4jlk2"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.193269 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4jlk2"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.216206 4682 scope.go:117] "RemoveContainer" containerID="b74aca3d602de956224fdc13b9befbe10b85daa875b0b2c34a8dde450ef39dff" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.232228 4682 scope.go:117] "RemoveContainer" containerID="d0733ec6cd51d18dd82306854cf92baf4ecf46c65de97d3ff1a89c8a0e2677a9" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.237674 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gp62l"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.245788 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gp62l"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.250734 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t9w8x"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.255391 4682 scope.go:117] "RemoveContainer" containerID="0fd09a16e86537325ceb33596a176c2d7cf5617f6d69ab5cf0dad23bcdb77797" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.256741 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t9w8x"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.260802 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lnqbd"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.262840 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lnqbd"] Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.285834 4682 scope.go:117] "RemoveContainer" containerID="c0a5175fd229cf8c9a76d2d8ab7f653c4312747059eadbc9fc1dcf879602ede2" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.299951 4682 scope.go:117] "RemoveContainer" containerID="71a8e4dc29d3f2177022b979577dd11ccadad7720ce226a24c68c32ff777a446" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.315431 4682 scope.go:117] "RemoveContainer" containerID="56d6f6fd46717957850eb0e48e505254bbfbefdfbecc8bad63ffcb4956584678" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.334902 4682 scope.go:117] "RemoveContainer" containerID="d382e979d0011c2522f3335a6c2587e2a253fcd79a4c608a7cbcf89815349ca7" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.349449 4682 scope.go:117] "RemoveContainer" containerID="9a8678ea12f8ba4f7e351f44986fcce02eb81b668338871538acd709d7692fc0" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.368498 4682 scope.go:117] "RemoveContainer" containerID="859ec75704fc1e1044f01a1d231290080b1697a47cd64751bc4355c381910995" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.383142 4682 scope.go:117] "RemoveContainer" containerID="cb6fba4689a476c08eb86f8b4ed19f4aeaf653f3886d562c394650dffcf0caca" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.388167 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" path="/var/lib/kubelet/pods/06e73e24-a522-4e08-98e0-5199a83b016f/volumes" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.389130 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" path="/var/lib/kubelet/pods/3ee34116-c378-4109-a0a2-e5ea084c98ad/volumes" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.389851 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" path="/var/lib/kubelet/pods/7ec82d4e-7aac-438d-ada1-ec31302939a7/volumes" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.391228 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" path="/var/lib/kubelet/pods/87412cec-b4af-4f63-a127-4ba4214d57b8/volumes" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.392060 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" path="/var/lib/kubelet/pods/c39ff528-9225-4c16-b25d-1b34929dadcb/volumes" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.974787 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4qz5q"] Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975089 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975110 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975130 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975141 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975156 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975170 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975183 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975194 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975207 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975218 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975242 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975253 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975269 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975281 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975296 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975307 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975322 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975333 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="extract-utilities" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975344 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975356 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975370 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975380 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975395 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975406 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: E1210 10:51:12.975424 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975435 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="extract-content" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975596 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c39ff528-9225-4c16-b25d-1b34929dadcb" containerName="marketplace-operator" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975611 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ec82d4e-7aac-438d-ada1-ec31302939a7" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975626 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e73e24-a522-4e08-98e0-5199a83b016f" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975637 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="87412cec-b4af-4f63-a127-4ba4214d57b8" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.975665 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ee34116-c378-4109-a0a2-e5ea084c98ad" containerName="registry-server" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.976770 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.979992 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 10:51:12 crc kubenswrapper[4682]: I1210 10:51:12.991294 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qz5q"] Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.149252 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25834669-f151-40f3-8e93-78092435a84e-utilities\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.149335 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g54bh\" (UniqueName: \"kubernetes.io/projected/25834669-f151-40f3-8e93-78092435a84e-kube-api-access-g54bh\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.149375 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25834669-f151-40f3-8e93-78092435a84e-catalog-content\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.176272 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-479t2"] Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.177416 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.181191 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.183431 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-479t2"] Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.250461 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25834669-f151-40f3-8e93-78092435a84e-utilities\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.250555 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g54bh\" (UniqueName: \"kubernetes.io/projected/25834669-f151-40f3-8e93-78092435a84e-kube-api-access-g54bh\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.250599 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25834669-f151-40f3-8e93-78092435a84e-catalog-content\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.251037 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25834669-f151-40f3-8e93-78092435a84e-utilities\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.251081 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25834669-f151-40f3-8e93-78092435a84e-catalog-content\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.272058 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g54bh\" (UniqueName: \"kubernetes.io/projected/25834669-f151-40f3-8e93-78092435a84e-kube-api-access-g54bh\") pod \"redhat-marketplace-4qz5q\" (UID: \"25834669-f151-40f3-8e93-78092435a84e\") " pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.310911 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.352299 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-utilities\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.352382 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-catalog-content\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.352409 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw6mf\" (UniqueName: \"kubernetes.io/projected/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-kube-api-access-gw6mf\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.454157 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-catalog-content\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.454228 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw6mf\" (UniqueName: \"kubernetes.io/projected/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-kube-api-access-gw6mf\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.454746 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-catalog-content\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.455990 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-utilities\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.455689 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-utilities\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.471101 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw6mf\" (UniqueName: \"kubernetes.io/projected/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-kube-api-access-gw6mf\") pod \"redhat-operators-479t2\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.498755 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.706442 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qz5q"] Dec 10 10:51:13 crc kubenswrapper[4682]: W1210 10:51:13.713401 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25834669_f151_40f3_8e93_78092435a84e.slice/crio-17dad845745dbef4b61ba8f56d9db5e162c8f6feb0ad27c653d5aa7bde6e9990 WatchSource:0}: Error finding container 17dad845745dbef4b61ba8f56d9db5e162c8f6feb0ad27c653d5aa7bde6e9990: Status 404 returned error can't find the container with id 17dad845745dbef4b61ba8f56d9db5e162c8f6feb0ad27c653d5aa7bde6e9990 Dec 10 10:51:13 crc kubenswrapper[4682]: I1210 10:51:13.891031 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-479t2"] Dec 10 10:51:14 crc kubenswrapper[4682]: E1210 10:51:14.071451 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7b64e32_864b_4ea9_b1b1_1aec3c503a8c.slice/crio-909ff250f15e146e0c5387b4282017f9c03d92e6e459ba45e0601f81b6fb6355.scope\": RecentStats: unable to find data in memory cache]" Dec 10 10:51:14 crc kubenswrapper[4682]: I1210 10:51:14.175160 4682 generic.go:334] "Generic (PLEG): container finished" podID="25834669-f151-40f3-8e93-78092435a84e" containerID="0eb335d36c4c69b38c15f3b4c5504e3d1cf6a41c3284c75facd4012088e268bd" exitCode=0 Dec 10 10:51:14 crc kubenswrapper[4682]: I1210 10:51:14.175221 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qz5q" event={"ID":"25834669-f151-40f3-8e93-78092435a84e","Type":"ContainerDied","Data":"0eb335d36c4c69b38c15f3b4c5504e3d1cf6a41c3284c75facd4012088e268bd"} Dec 10 10:51:14 crc kubenswrapper[4682]: I1210 10:51:14.175513 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qz5q" event={"ID":"25834669-f151-40f3-8e93-78092435a84e","Type":"ContainerStarted","Data":"17dad845745dbef4b61ba8f56d9db5e162c8f6feb0ad27c653d5aa7bde6e9990"} Dec 10 10:51:14 crc kubenswrapper[4682]: I1210 10:51:14.177954 4682 generic.go:334] "Generic (PLEG): container finished" podID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerID="909ff250f15e146e0c5387b4282017f9c03d92e6e459ba45e0601f81b6fb6355" exitCode=0 Dec 10 10:51:14 crc kubenswrapper[4682]: I1210 10:51:14.178124 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-479t2" event={"ID":"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c","Type":"ContainerDied","Data":"909ff250f15e146e0c5387b4282017f9c03d92e6e459ba45e0601f81b6fb6355"} Dec 10 10:51:14 crc kubenswrapper[4682]: I1210 10:51:14.178164 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-479t2" event={"ID":"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c","Type":"ContainerStarted","Data":"d7f2ef751cf7defa2453903e710eee3da5e068d120c9ad1dc58f37efc9de0184"} Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.393621 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xd7ms"] Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.404858 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xd7ms"] Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.405008 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.408284 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.577326 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qqtm6"] Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.578333 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.581188 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.583569 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-catalog-content\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.583670 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgtqv\" (UniqueName: \"kubernetes.io/projected/170511a5-ad2a-4906-94a0-a712cb687bb9-kube-api-access-mgtqv\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.583704 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-utilities\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.585945 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qqtm6"] Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.685812 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/646faf8c-7ee0-40f1-a240-18d7e8314632-catalog-content\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.685892 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgtqv\" (UniqueName: \"kubernetes.io/projected/170511a5-ad2a-4906-94a0-a712cb687bb9-kube-api-access-mgtqv\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.685920 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/646faf8c-7ee0-40f1-a240-18d7e8314632-utilities\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.685954 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-utilities\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.686007 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-catalog-content\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.686043 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8pft\" (UniqueName: \"kubernetes.io/projected/646faf8c-7ee0-40f1-a240-18d7e8314632-kube-api-access-v8pft\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.686568 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-utilities\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.686594 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-catalog-content\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.707647 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgtqv\" (UniqueName: \"kubernetes.io/projected/170511a5-ad2a-4906-94a0-a712cb687bb9-kube-api-access-mgtqv\") pod \"certified-operators-xd7ms\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.725653 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.786877 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8pft\" (UniqueName: \"kubernetes.io/projected/646faf8c-7ee0-40f1-a240-18d7e8314632-kube-api-access-v8pft\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.786941 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/646faf8c-7ee0-40f1-a240-18d7e8314632-catalog-content\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.786987 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/646faf8c-7ee0-40f1-a240-18d7e8314632-utilities\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.787421 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/646faf8c-7ee0-40f1-a240-18d7e8314632-utilities\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.790439 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/646faf8c-7ee0-40f1-a240-18d7e8314632-catalog-content\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.807460 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8pft\" (UniqueName: \"kubernetes.io/projected/646faf8c-7ee0-40f1-a240-18d7e8314632-kube-api-access-v8pft\") pod \"community-operators-qqtm6\" (UID: \"646faf8c-7ee0-40f1-a240-18d7e8314632\") " pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:15 crc kubenswrapper[4682]: I1210 10:51:15.893508 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.109274 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xd7ms"] Dec 10 10:51:16 crc kubenswrapper[4682]: W1210 10:51:16.116151 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod170511a5_ad2a_4906_94a0_a712cb687bb9.slice/crio-f0581389064f81ed75b2d1e166f62ce6e37efacb264d24a4dce6be95aa995d1b WatchSource:0}: Error finding container f0581389064f81ed75b2d1e166f62ce6e37efacb264d24a4dce6be95aa995d1b: Status 404 returned error can't find the container with id f0581389064f81ed75b2d1e166f62ce6e37efacb264d24a4dce6be95aa995d1b Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.189482 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd7ms" event={"ID":"170511a5-ad2a-4906-94a0-a712cb687bb9","Type":"ContainerStarted","Data":"f0581389064f81ed75b2d1e166f62ce6e37efacb264d24a4dce6be95aa995d1b"} Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.191519 4682 generic.go:334] "Generic (PLEG): container finished" podID="25834669-f151-40f3-8e93-78092435a84e" containerID="12b84fc698b927d5bbedf07b1d1f257a6881f5e67867c78e77aad554a8278661" exitCode=0 Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.191592 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qz5q" event={"ID":"25834669-f151-40f3-8e93-78092435a84e","Type":"ContainerDied","Data":"12b84fc698b927d5bbedf07b1d1f257a6881f5e67867c78e77aad554a8278661"} Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.193682 4682 generic.go:334] "Generic (PLEG): container finished" podID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerID="cfda238df6e257e2530ca90a85fbae3ab29043cab5afa397f43a51bf75b85f4f" exitCode=0 Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.193756 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-479t2" event={"ID":"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c","Type":"ContainerDied","Data":"cfda238df6e257e2530ca90a85fbae3ab29043cab5afa397f43a51bf75b85f4f"} Dec 10 10:51:16 crc kubenswrapper[4682]: I1210 10:51:16.271933 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qqtm6"] Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.202047 4682 generic.go:334] "Generic (PLEG): container finished" podID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerID="b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309" exitCode=0 Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.202104 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd7ms" event={"ID":"170511a5-ad2a-4906-94a0-a712cb687bb9","Type":"ContainerDied","Data":"b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309"} Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.209319 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-479t2" event={"ID":"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c","Type":"ContainerStarted","Data":"47c6fb96bca99b4776878f3afc716b545aba74845a9555288fd1f84bb4e5340a"} Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.210563 4682 generic.go:334] "Generic (PLEG): container finished" podID="646faf8c-7ee0-40f1-a240-18d7e8314632" containerID="9c0f70731bc491a3cc76d224837306879a171e3e4ab92d2a502b91c7d450ad97" exitCode=0 Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.210607 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqtm6" event={"ID":"646faf8c-7ee0-40f1-a240-18d7e8314632","Type":"ContainerDied","Data":"9c0f70731bc491a3cc76d224837306879a171e3e4ab92d2a502b91c7d450ad97"} Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.210633 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqtm6" event={"ID":"646faf8c-7ee0-40f1-a240-18d7e8314632","Type":"ContainerStarted","Data":"55b4385158b0f2b657154fce406f15bd473423a64d0e5ef513eaa9540dc8e3e9"} Dec 10 10:51:17 crc kubenswrapper[4682]: I1210 10:51:17.243295 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-479t2" podStartSLOduration=1.635916242 podStartE2EDuration="4.243274487s" podCreationTimestamp="2025-12-10 10:51:13 +0000 UTC" firstStartedPulling="2025-12-10 10:51:14.179244229 +0000 UTC m=+354.499455009" lastFinishedPulling="2025-12-10 10:51:16.786602504 +0000 UTC m=+357.106813254" observedRunningTime="2025-12-10 10:51:17.237445206 +0000 UTC m=+357.557655966" watchObservedRunningTime="2025-12-10 10:51:17.243274487 +0000 UTC m=+357.563485237" Dec 10 10:51:19 crc kubenswrapper[4682]: I1210 10:51:19.224834 4682 generic.go:334] "Generic (PLEG): container finished" podID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerID="e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6" exitCode=0 Dec 10 10:51:19 crc kubenswrapper[4682]: I1210 10:51:19.224886 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd7ms" event={"ID":"170511a5-ad2a-4906-94a0-a712cb687bb9","Type":"ContainerDied","Data":"e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6"} Dec 10 10:51:19 crc kubenswrapper[4682]: I1210 10:51:19.228083 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qz5q" event={"ID":"25834669-f151-40f3-8e93-78092435a84e","Type":"ContainerStarted","Data":"961b0d64fcb16ba8cd1dede14887d3d70679f0bc8c865c87992cd7a7a8e75cd8"} Dec 10 10:51:19 crc kubenswrapper[4682]: I1210 10:51:19.231820 4682 generic.go:334] "Generic (PLEG): container finished" podID="646faf8c-7ee0-40f1-a240-18d7e8314632" containerID="9bf4cbdfd9d161caf416c455c275165da84b448f9a02171105ace540716ab75c" exitCode=0 Dec 10 10:51:19 crc kubenswrapper[4682]: I1210 10:51:19.231855 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqtm6" event={"ID":"646faf8c-7ee0-40f1-a240-18d7e8314632","Type":"ContainerDied","Data":"9bf4cbdfd9d161caf416c455c275165da84b448f9a02171105ace540716ab75c"} Dec 10 10:51:19 crc kubenswrapper[4682]: I1210 10:51:19.261628 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4qz5q" podStartSLOduration=3.367869456 podStartE2EDuration="7.261606074s" podCreationTimestamp="2025-12-10 10:51:12 +0000 UTC" firstStartedPulling="2025-12-10 10:51:14.176984694 +0000 UTC m=+354.497195444" lastFinishedPulling="2025-12-10 10:51:18.070721312 +0000 UTC m=+358.390932062" observedRunningTime="2025-12-10 10:51:19.255644467 +0000 UTC m=+359.575855227" watchObservedRunningTime="2025-12-10 10:51:19.261606074 +0000 UTC m=+359.581816824" Dec 10 10:51:21 crc kubenswrapper[4682]: I1210 10:51:21.251338 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd7ms" event={"ID":"170511a5-ad2a-4906-94a0-a712cb687bb9","Type":"ContainerStarted","Data":"5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3"} Dec 10 10:51:21 crc kubenswrapper[4682]: I1210 10:51:21.253136 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qqtm6" event={"ID":"646faf8c-7ee0-40f1-a240-18d7e8314632","Type":"ContainerStarted","Data":"afce3a43a350ccd96138f8ab66a6efd729c143a04673570a028c8c1fa52e2800"} Dec 10 10:51:21 crc kubenswrapper[4682]: I1210 10:51:21.273751 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xd7ms" podStartSLOduration=3.741071953 podStartE2EDuration="6.273735435s" podCreationTimestamp="2025-12-10 10:51:15 +0000 UTC" firstStartedPulling="2025-12-10 10:51:17.204116926 +0000 UTC m=+357.524327676" lastFinishedPulling="2025-12-10 10:51:19.736780408 +0000 UTC m=+360.056991158" observedRunningTime="2025-12-10 10:51:21.272824284 +0000 UTC m=+361.593035044" watchObservedRunningTime="2025-12-10 10:51:21.273735435 +0000 UTC m=+361.593946185" Dec 10 10:51:21 crc kubenswrapper[4682]: I1210 10:51:21.291561 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qqtm6" podStartSLOduration=3.568042916 podStartE2EDuration="6.291542672s" podCreationTimestamp="2025-12-10 10:51:15 +0000 UTC" firstStartedPulling="2025-12-10 10:51:17.211956235 +0000 UTC m=+357.532166985" lastFinishedPulling="2025-12-10 10:51:19.935455991 +0000 UTC m=+360.255666741" observedRunningTime="2025-12-10 10:51:21.288974527 +0000 UTC m=+361.609185277" watchObservedRunningTime="2025-12-10 10:51:21.291542672 +0000 UTC m=+361.611753422" Dec 10 10:51:22 crc kubenswrapper[4682]: I1210 10:51:22.571286 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" podUID="0a575832-6a51-4f80-9c12-346c7d4764f2" containerName="registry" containerID="cri-o://22898a074661b05f90118ce18378435473ec705902cf6ae3f2e3347de54e6db6" gracePeriod=30 Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.264594 4682 generic.go:334] "Generic (PLEG): container finished" podID="0a575832-6a51-4f80-9c12-346c7d4764f2" containerID="22898a074661b05f90118ce18378435473ec705902cf6ae3f2e3347de54e6db6" exitCode=0 Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.264654 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" event={"ID":"0a575832-6a51-4f80-9c12-346c7d4764f2","Type":"ContainerDied","Data":"22898a074661b05f90118ce18378435473ec705902cf6ae3f2e3347de54e6db6"} Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.312046 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.312315 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.360807 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.499501 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.499555 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.539822 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.542037 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.688953 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-tls\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689054 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-certificates\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689111 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0a575832-6a51-4f80-9c12-346c7d4764f2-installation-pull-secrets\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689346 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689639 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpvq9\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-kube-api-access-kpvq9\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689682 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-bound-sa-token\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689765 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-trusted-ca\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.689807 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0a575832-6a51-4f80-9c12-346c7d4764f2-ca-trust-extracted\") pod \"0a575832-6a51-4f80-9c12-346c7d4764f2\" (UID: \"0a575832-6a51-4f80-9c12-346c7d4764f2\") " Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.690668 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.690800 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.696244 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.696656 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a575832-6a51-4f80-9c12-346c7d4764f2-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.696905 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.697629 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-kube-api-access-kpvq9" (OuterVolumeSpecName: "kube-api-access-kpvq9") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "kube-api-access-kpvq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.700024 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.705495 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a575832-6a51-4f80-9c12-346c7d4764f2-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "0a575832-6a51-4f80-9c12-346c7d4764f2" (UID: "0a575832-6a51-4f80-9c12-346c7d4764f2"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791405 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791445 4682 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0a575832-6a51-4f80-9c12-346c7d4764f2-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791460 4682 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791502 4682 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0a575832-6a51-4f80-9c12-346c7d4764f2-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791521 4682 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0a575832-6a51-4f80-9c12-346c7d4764f2-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791537 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpvq9\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-kube-api-access-kpvq9\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:23 crc kubenswrapper[4682]: I1210 10:51:23.791552 4682 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a575832-6a51-4f80-9c12-346c7d4764f2-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.272659 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" event={"ID":"0a575832-6a51-4f80-9c12-346c7d4764f2","Type":"ContainerDied","Data":"0703cd0b5fa1b628338be4c352f2588b91ffa1198d548c38da55458e58b879bf"} Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.272774 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mpnmc" Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.273037 4682 scope.go:117] "RemoveContainer" containerID="22898a074661b05f90118ce18378435473ec705902cf6ae3f2e3347de54e6db6" Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.304412 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mpnmc"] Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.310000 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mpnmc"] Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.325713 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.326318 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4qz5q" Dec 10 10:51:24 crc kubenswrapper[4682]: I1210 10:51:24.390309 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a575832-6a51-4f80-9c12-346c7d4764f2" path="/var/lib/kubelet/pods/0a575832-6a51-4f80-9c12-346c7d4764f2/volumes" Dec 10 10:51:25 crc kubenswrapper[4682]: I1210 10:51:25.726330 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:25 crc kubenswrapper[4682]: I1210 10:51:25.726703 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:25 crc kubenswrapper[4682]: I1210 10:51:25.767607 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:25 crc kubenswrapper[4682]: I1210 10:51:25.893777 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:25 crc kubenswrapper[4682]: I1210 10:51:25.894565 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:25 crc kubenswrapper[4682]: I1210 10:51:25.931595 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:26 crc kubenswrapper[4682]: I1210 10:51:26.321384 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 10:51:26 crc kubenswrapper[4682]: I1210 10:51:26.324156 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qqtm6" Dec 10 10:51:36 crc kubenswrapper[4682]: I1210 10:51:36.479363 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:51:36 crc kubenswrapper[4682]: I1210 10:51:36.479881 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:52:06 crc kubenswrapper[4682]: I1210 10:52:06.479011 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:52:06 crc kubenswrapper[4682]: I1210 10:52:06.479794 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.478370 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.479255 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.480054 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.481201 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6e38945e715c1abac31bb88fcaf30353ca7e19cc11c8812056c7fad1c9342ed1"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.481316 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://6e38945e715c1abac31bb88fcaf30353ca7e19cc11c8812056c7fad1c9342ed1" gracePeriod=600 Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.920822 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="6e38945e715c1abac31bb88fcaf30353ca7e19cc11c8812056c7fad1c9342ed1" exitCode=0 Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.920881 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"6e38945e715c1abac31bb88fcaf30353ca7e19cc11c8812056c7fad1c9342ed1"} Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.921248 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"24b3429e3e43a35cd2e6a0d08a5c397cb0299c4d8c6b5f72ac9981458cf65f39"} Dec 10 10:52:36 crc kubenswrapper[4682]: I1210 10:52:36.921271 4682 scope.go:117] "RemoveContainer" containerID="57c1739a7d655f32da0d09cb9b928cb8d68962e97f7a7da2f0699ff6fd522a26" Dec 10 10:54:20 crc kubenswrapper[4682]: I1210 10:54:20.622862 4682 scope.go:117] "RemoveContainer" containerID="40128a0d5187a5eaa67019a8a7c9bfa5be585bb9b9733638d6d77c94fcb4533f" Dec 10 10:54:36 crc kubenswrapper[4682]: I1210 10:54:36.479159 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:54:36 crc kubenswrapper[4682]: I1210 10:54:36.479801 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:55:06 crc kubenswrapper[4682]: I1210 10:55:06.478694 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:55:06 crc kubenswrapper[4682]: I1210 10:55:06.479559 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.478365 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.479288 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.479351 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.480009 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"24b3429e3e43a35cd2e6a0d08a5c397cb0299c4d8c6b5f72ac9981458cf65f39"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.480066 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://24b3429e3e43a35cd2e6a0d08a5c397cb0299c4d8c6b5f72ac9981458cf65f39" gracePeriod=600 Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.946019 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="24b3429e3e43a35cd2e6a0d08a5c397cb0299c4d8c6b5f72ac9981458cf65f39" exitCode=0 Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.946135 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"24b3429e3e43a35cd2e6a0d08a5c397cb0299c4d8c6b5f72ac9981458cf65f39"} Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.946369 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"c6bff78a240d5adae318d431b3e181644756793c403e51687d775ce4fb2cfb9a"} Dec 10 10:55:36 crc kubenswrapper[4682]: I1210 10:55:36.946399 4682 scope.go:117] "RemoveContainer" containerID="6e38945e715c1abac31bb88fcaf30353ca7e19cc11c8812056c7fad1c9342ed1" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.435048 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz"] Dec 10 10:57:22 crc kubenswrapper[4682]: E1210 10:57:22.436097 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a575832-6a51-4f80-9c12-346c7d4764f2" containerName="registry" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.436113 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a575832-6a51-4f80-9c12-346c7d4764f2" containerName="registry" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.436236 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a575832-6a51-4f80-9c12-346c7d4764f2" containerName="registry" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.437070 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.439229 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.459345 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz"] Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.491865 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.491915 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54rwx\" (UniqueName: \"kubernetes.io/projected/e373320b-0c25-4165-b27a-ff5b889dd9a9-kube-api-access-54rwx\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.492048 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.593159 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.593249 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.593274 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54rwx\" (UniqueName: \"kubernetes.io/projected/e373320b-0c25-4165-b27a-ff5b889dd9a9-kube-api-access-54rwx\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.593784 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.593795 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.620023 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54rwx\" (UniqueName: \"kubernetes.io/projected/e373320b-0c25-4165-b27a-ff5b889dd9a9-kube-api-access-54rwx\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.752852 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:22 crc kubenswrapper[4682]: I1210 10:57:22.928851 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz"] Dec 10 10:57:23 crc kubenswrapper[4682]: I1210 10:57:23.578829 4682 generic.go:334] "Generic (PLEG): container finished" podID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerID="3bd16da297651c37c6223becaf38fe823f2214c04f510f7cf67640d8f21c7b56" exitCode=0 Dec 10 10:57:23 crc kubenswrapper[4682]: I1210 10:57:23.578880 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" event={"ID":"e373320b-0c25-4165-b27a-ff5b889dd9a9","Type":"ContainerDied","Data":"3bd16da297651c37c6223becaf38fe823f2214c04f510f7cf67640d8f21c7b56"} Dec 10 10:57:23 crc kubenswrapper[4682]: I1210 10:57:23.579129 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" event={"ID":"e373320b-0c25-4165-b27a-ff5b889dd9a9","Type":"ContainerStarted","Data":"2520ff809c82b5c46e880389e8ab56aff6af01ff765420b77a9801d52db0e83d"} Dec 10 10:57:23 crc kubenswrapper[4682]: I1210 10:57:23.580259 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 10:57:25 crc kubenswrapper[4682]: I1210 10:57:25.596312 4682 generic.go:334] "Generic (PLEG): container finished" podID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerID="be2dcdf7add5a1ee5208a130d713abbfde0420545a82fed8bcc9d410576dbe75" exitCode=0 Dec 10 10:57:25 crc kubenswrapper[4682]: I1210 10:57:25.596421 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" event={"ID":"e373320b-0c25-4165-b27a-ff5b889dd9a9","Type":"ContainerDied","Data":"be2dcdf7add5a1ee5208a130d713abbfde0420545a82fed8bcc9d410576dbe75"} Dec 10 10:57:26 crc kubenswrapper[4682]: I1210 10:57:26.615895 4682 generic.go:334] "Generic (PLEG): container finished" podID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerID="5d32704e9f54e9cbfbc8104b798e5fa709ced53e18ced43b29792ba802ff05cd" exitCode=0 Dec 10 10:57:26 crc kubenswrapper[4682]: I1210 10:57:26.615954 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" event={"ID":"e373320b-0c25-4165-b27a-ff5b889dd9a9","Type":"ContainerDied","Data":"5d32704e9f54e9cbfbc8104b798e5fa709ced53e18ced43b29792ba802ff05cd"} Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.831551 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.956431 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54rwx\" (UniqueName: \"kubernetes.io/projected/e373320b-0c25-4165-b27a-ff5b889dd9a9-kube-api-access-54rwx\") pod \"e373320b-0c25-4165-b27a-ff5b889dd9a9\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.956595 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-util\") pod \"e373320b-0c25-4165-b27a-ff5b889dd9a9\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.956624 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-bundle\") pod \"e373320b-0c25-4165-b27a-ff5b889dd9a9\" (UID: \"e373320b-0c25-4165-b27a-ff5b889dd9a9\") " Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.958997 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-bundle" (OuterVolumeSpecName: "bundle") pod "e373320b-0c25-4165-b27a-ff5b889dd9a9" (UID: "e373320b-0c25-4165-b27a-ff5b889dd9a9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.964356 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e373320b-0c25-4165-b27a-ff5b889dd9a9-kube-api-access-54rwx" (OuterVolumeSpecName: "kube-api-access-54rwx") pod "e373320b-0c25-4165-b27a-ff5b889dd9a9" (UID: "e373320b-0c25-4165-b27a-ff5b889dd9a9"). InnerVolumeSpecName "kube-api-access-54rwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:57:27 crc kubenswrapper[4682]: I1210 10:57:27.970572 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-util" (OuterVolumeSpecName: "util") pod "e373320b-0c25-4165-b27a-ff5b889dd9a9" (UID: "e373320b-0c25-4165-b27a-ff5b889dd9a9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:57:28 crc kubenswrapper[4682]: I1210 10:57:28.058191 4682 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-util\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:28 crc kubenswrapper[4682]: I1210 10:57:28.058225 4682 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e373320b-0c25-4165-b27a-ff5b889dd9a9-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:28 crc kubenswrapper[4682]: I1210 10:57:28.058234 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54rwx\" (UniqueName: \"kubernetes.io/projected/e373320b-0c25-4165-b27a-ff5b889dd9a9-kube-api-access-54rwx\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:28 crc kubenswrapper[4682]: I1210 10:57:28.627542 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" event={"ID":"e373320b-0c25-4165-b27a-ff5b889dd9a9","Type":"ContainerDied","Data":"2520ff809c82b5c46e880389e8ab56aff6af01ff765420b77a9801d52db0e83d"} Dec 10 10:57:28 crc kubenswrapper[4682]: I1210 10:57:28.627586 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2520ff809c82b5c46e880389e8ab56aff6af01ff765420b77a9801d52db0e83d" Dec 10 10:57:28 crc kubenswrapper[4682]: I1210 10:57:28.627660 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.478911 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vmhkf"] Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.479959 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-controller" containerID="cri-o://ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.480019 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="nbdb" containerID="cri-o://a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.480095 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="northd" containerID="cri-o://bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.480141 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.480178 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-node" containerID="cri-o://45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.480218 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-acl-logging" containerID="cri-o://8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.480390 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="sbdb" containerID="cri-o://115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.505798 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" containerID="cri-o://fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" gracePeriod=30 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.666545 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovnkube-controller/3.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.669626 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovn-acl-logging/0.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670116 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovn-controller/0.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670669 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" exitCode=0 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670706 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" exitCode=0 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670721 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" exitCode=0 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670731 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" exitCode=143 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670740 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" exitCode=143 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670798 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290"} Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670832 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487"} Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670844 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506"} Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670856 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33"} Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670868 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3"} Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.670886 4682 scope.go:117] "RemoveContainer" containerID="6bd9d948edf86eb37a587e96fc486bcf3f6339afbc8db0861428dc2f43f5a3c9" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.672933 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/2.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.673393 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/1.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.673451 4682 generic.go:334] "Generic (PLEG): container finished" podID="a005c959-3805-4e15-aa3a-7093815e03b8" containerID="c06143a1c59cfb88d374e761a7c11462e0b30d2649b518183753eba214aa6465" exitCode=2 Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.673501 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerDied","Data":"c06143a1c59cfb88d374e761a7c11462e0b30d2649b518183753eba214aa6465"} Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.674076 4682 scope.go:117] "RemoveContainer" containerID="c06143a1c59cfb88d374e761a7c11462e0b30d2649b518183753eba214aa6465" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.801938 4682 scope.go:117] "RemoveContainer" containerID="a7b979e9cc3b0e9077533cb434014c582b24756abb4f4b3a178ac7be985512fd" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.823765 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovn-acl-logging/0.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.824267 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovn-controller/0.log" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.824747 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.888753 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-twn8s"] Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.888994 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="extract" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889011 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="extract" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889020 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889027 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889035 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kubecfg-setup" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889041 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kubecfg-setup" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889048 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889056 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889066 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="sbdb" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889072 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="sbdb" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889080 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="nbdb" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889087 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="nbdb" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889099 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889105 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889113 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-node" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889119 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-node" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889128 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889134 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889141 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="util" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889147 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="util" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889154 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="northd" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889161 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="northd" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889170 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-acl-logging" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889175 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-acl-logging" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889182 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889187 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889195 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="pull" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889201 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="pull" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889284 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-node" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889295 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889302 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-acl-logging" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889309 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889316 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889323 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889330 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="e373320b-0c25-4165-b27a-ff5b889dd9a9" containerName="extract" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889340 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="sbdb" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889349 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889357 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="nbdb" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889367 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="northd" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889374 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovn-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.889460 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.889483 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.891292 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: E1210 10:57:33.891407 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.891422 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerName="ovnkube-controller" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.892947 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.932788 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-ovn\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.932870 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-script-lib\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.932901 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-log-socket\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.932898 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.932951 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-log-socket" (OuterVolumeSpecName: "log-socket") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933004 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-var-lib-openvswitch\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933040 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933393 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933420 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-config\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933444 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-ovn-kubernetes\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933485 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-etc-openvswitch\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933511 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-systemd\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933526 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-bin\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933551 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovn-node-metrics-cert\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933575 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk8dd\" (UniqueName: \"kubernetes.io/projected/0d4402e6-a6f6-4970-8392-9f1856b52eb4-kube-api-access-hk8dd\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933607 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-netns\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933628 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-netd\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933660 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-env-overrides\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933680 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933704 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-node-log\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933723 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-openvswitch\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933740 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-slash\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933763 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-kubelet\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933784 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-systemd-units\") pod \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\" (UID: \"0d4402e6-a6f6-4970-8392-9f1856b52eb4\") " Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933860 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933888 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-cni-bin\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933921 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-etc-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933940 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovnkube-config\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933961 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-cni-netd\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933978 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrvqp\" (UniqueName: \"kubernetes.io/projected/3c8fd450-01c2-47e2-8dfa-018323dc5eab-kube-api-access-hrvqp\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.933999 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-ovn\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934024 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-slash\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934046 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovnkube-script-lib\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934066 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-run-netns\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934089 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934120 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-systemd\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934139 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-var-lib-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934159 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-log-socket\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934183 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-run-ovn-kubernetes\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934207 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-node-log\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934228 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-env-overrides\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934255 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-systemd-units\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934326 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovn-node-metrics-cert\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934355 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-kubelet\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934394 4682 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934408 4682 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934421 4682 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-log-socket\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934488 4682 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934705 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934764 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934802 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934946 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.934978 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935230 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-node-log" (OuterVolumeSpecName: "node-log") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935264 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935288 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-slash" (OuterVolumeSpecName: "host-slash") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935311 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935652 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935656 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.935692 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.936114 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.940189 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d4402e6-a6f6-4970-8392-9f1856b52eb4-kube-api-access-hk8dd" (OuterVolumeSpecName: "kube-api-access-hk8dd") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "kube-api-access-hk8dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.940236 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 10:57:33 crc kubenswrapper[4682]: I1210 10:57:33.949278 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "0d4402e6-a6f6-4970-8392-9f1856b52eb4" (UID: "0d4402e6-a6f6-4970-8392-9f1856b52eb4"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035378 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-systemd\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035443 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-var-lib-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035485 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-log-socket\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035508 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-run-ovn-kubernetes\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035510 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-systemd\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035533 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-node-log\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035554 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-env-overrides\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035577 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-systemd-units\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035587 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-log-socket\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035639 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-systemd-units\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035663 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovn-node-metrics-cert\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035685 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-run-ovn-kubernetes\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035704 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-kubelet\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035719 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-node-log\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035739 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035770 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-cni-bin\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035814 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovnkube-config\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035840 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-etc-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035863 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-cni-netd\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035892 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrvqp\" (UniqueName: \"kubernetes.io/projected/3c8fd450-01c2-47e2-8dfa-018323dc5eab-kube-api-access-hrvqp\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035927 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-ovn\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.035965 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-slash\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036000 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovnkube-script-lib\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036020 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-run-netns\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036055 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036148 4682 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036166 4682 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036182 4682 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036194 4682 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036207 4682 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036219 4682 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0d4402e6-a6f6-4970-8392-9f1856b52eb4-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036230 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk8dd\" (UniqueName: \"kubernetes.io/projected/0d4402e6-a6f6-4970-8392-9f1856b52eb4-kube-api-access-hk8dd\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036241 4682 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036253 4682 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036263 4682 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0d4402e6-a6f6-4970-8392-9f1856b52eb4-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036266 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-env-overrides\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036275 4682 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036292 4682 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-node-log\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036305 4682 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036317 4682 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-slash\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036327 4682 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036339 4682 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0d4402e6-a6f6-4970-8392-9f1856b52eb4-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036376 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036411 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-etc-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036442 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-cni-netd\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036748 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovnkube-config\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.036798 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-var-lib-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037076 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-slash\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037129 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-run-netns\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037148 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-cni-bin\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037149 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-ovn\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037190 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-host-kubelet\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037150 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3c8fd450-01c2-47e2-8dfa-018323dc5eab-run-openvswitch\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.037721 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovnkube-script-lib\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.042881 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3c8fd450-01c2-47e2-8dfa-018323dc5eab-ovn-node-metrics-cert\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.057715 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrvqp\" (UniqueName: \"kubernetes.io/projected/3c8fd450-01c2-47e2-8dfa-018323dc5eab-kube-api-access-hrvqp\") pod \"ovnkube-node-twn8s\" (UID: \"3c8fd450-01c2-47e2-8dfa-018323dc5eab\") " pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.208581 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:34 crc kubenswrapper[4682]: W1210 10:57:34.227867 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c8fd450_01c2_47e2_8dfa_018323dc5eab.slice/crio-5b653593caf7d544c0f0a824227177c1abb07c9196efd575c64265701fffff08 WatchSource:0}: Error finding container 5b653593caf7d544c0f0a824227177c1abb07c9196efd575c64265701fffff08: Status 404 returned error can't find the container with id 5b653593caf7d544c0f0a824227177c1abb07c9196efd575c64265701fffff08 Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.680831 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovn-acl-logging/0.log" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.682707 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vmhkf_0d4402e6-a6f6-4970-8392-9f1856b52eb4/ovn-controller/0.log" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683045 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" exitCode=0 Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683069 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" exitCode=0 Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683077 4682 generic.go:334] "Generic (PLEG): container finished" podID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" containerID="bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" exitCode=0 Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683119 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683139 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683152 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683163 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" event={"ID":"0d4402e6-a6f6-4970-8392-9f1856b52eb4","Type":"ContainerDied","Data":"77f383ee433230c9780e7ed96b643708b6843c7a007b8329f8cb05d0f0af997f"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683181 4682 scope.go:117] "RemoveContainer" containerID="fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.683324 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vmhkf" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.685893 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zs6ss_a005c959-3805-4e15-aa3a-7093815e03b8/kube-multus/2.log" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.685989 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zs6ss" event={"ID":"a005c959-3805-4e15-aa3a-7093815e03b8","Type":"ContainerStarted","Data":"feeaf056795101a50f36236b6de65327942af528d6b59a590bf0058d51cd35d7"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.690577 4682 generic.go:334] "Generic (PLEG): container finished" podID="3c8fd450-01c2-47e2-8dfa-018323dc5eab" containerID="0f5acd6cb26f012996d698e3c773638bc39fd1dc105e70e61174355d61351016" exitCode=0 Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.690647 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerDied","Data":"0f5acd6cb26f012996d698e3c773638bc39fd1dc105e70e61174355d61351016"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.690678 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"5b653593caf7d544c0f0a824227177c1abb07c9196efd575c64265701fffff08"} Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.711849 4682 scope.go:117] "RemoveContainer" containerID="115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.728252 4682 scope.go:117] "RemoveContainer" containerID="a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.743636 4682 scope.go:117] "RemoveContainer" containerID="bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.752620 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vmhkf"] Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.772038 4682 scope.go:117] "RemoveContainer" containerID="fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.773009 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vmhkf"] Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.790445 4682 scope.go:117] "RemoveContainer" containerID="45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.814328 4682 scope.go:117] "RemoveContainer" containerID="8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.832979 4682 scope.go:117] "RemoveContainer" containerID="ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.852787 4682 scope.go:117] "RemoveContainer" containerID="e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.885374 4682 scope.go:117] "RemoveContainer" containerID="fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.886886 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": container with ID starting with fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290 not found: ID does not exist" containerID="fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.886934 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290"} err="failed to get container status \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": rpc error: code = NotFound desc = could not find container \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": container with ID starting with fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.886973 4682 scope.go:117] "RemoveContainer" containerID="115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.890881 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": container with ID starting with 115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5 not found: ID does not exist" containerID="115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.890932 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5"} err="failed to get container status \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": rpc error: code = NotFound desc = could not find container \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": container with ID starting with 115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.890965 4682 scope.go:117] "RemoveContainer" containerID="a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.894866 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": container with ID starting with a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca not found: ID does not exist" containerID="a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.894909 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca"} err="failed to get container status \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": rpc error: code = NotFound desc = could not find container \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": container with ID starting with a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.894940 4682 scope.go:117] "RemoveContainer" containerID="bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.895235 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": container with ID starting with bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50 not found: ID does not exist" containerID="bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.895260 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50"} err="failed to get container status \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": rpc error: code = NotFound desc = could not find container \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": container with ID starting with bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.895276 4682 scope.go:117] "RemoveContainer" containerID="fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.895554 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": container with ID starting with fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487 not found: ID does not exist" containerID="fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.895584 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487"} err="failed to get container status \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": rpc error: code = NotFound desc = could not find container \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": container with ID starting with fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.895601 4682 scope.go:117] "RemoveContainer" containerID="45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.895853 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": container with ID starting with 45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506 not found: ID does not exist" containerID="45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.895878 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506"} err="failed to get container status \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": rpc error: code = NotFound desc = could not find container \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": container with ID starting with 45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.895894 4682 scope.go:117] "RemoveContainer" containerID="8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.896092 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": container with ID starting with 8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33 not found: ID does not exist" containerID="8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.896117 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33"} err="failed to get container status \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": rpc error: code = NotFound desc = could not find container \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": container with ID starting with 8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.896133 4682 scope.go:117] "RemoveContainer" containerID="ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.896422 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": container with ID starting with ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3 not found: ID does not exist" containerID="ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.896446 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3"} err="failed to get container status \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": rpc error: code = NotFound desc = could not find container \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": container with ID starting with ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.896482 4682 scope.go:117] "RemoveContainer" containerID="e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5" Dec 10 10:57:34 crc kubenswrapper[4682]: E1210 10:57:34.896811 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": container with ID starting with e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5 not found: ID does not exist" containerID="e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.896835 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5"} err="failed to get container status \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": rpc error: code = NotFound desc = could not find container \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": container with ID starting with e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.896851 4682 scope.go:117] "RemoveContainer" containerID="fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897131 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290"} err="failed to get container status \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": rpc error: code = NotFound desc = could not find container \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": container with ID starting with fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897154 4682 scope.go:117] "RemoveContainer" containerID="115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897399 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5"} err="failed to get container status \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": rpc error: code = NotFound desc = could not find container \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": container with ID starting with 115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897431 4682 scope.go:117] "RemoveContainer" containerID="a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897708 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca"} err="failed to get container status \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": rpc error: code = NotFound desc = could not find container \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": container with ID starting with a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897732 4682 scope.go:117] "RemoveContainer" containerID="bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897906 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50"} err="failed to get container status \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": rpc error: code = NotFound desc = could not find container \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": container with ID starting with bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.897930 4682 scope.go:117] "RemoveContainer" containerID="fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898141 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487"} err="failed to get container status \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": rpc error: code = NotFound desc = could not find container \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": container with ID starting with fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898164 4682 scope.go:117] "RemoveContainer" containerID="45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898412 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506"} err="failed to get container status \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": rpc error: code = NotFound desc = could not find container \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": container with ID starting with 45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898438 4682 scope.go:117] "RemoveContainer" containerID="8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898743 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33"} err="failed to get container status \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": rpc error: code = NotFound desc = could not find container \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": container with ID starting with 8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898765 4682 scope.go:117] "RemoveContainer" containerID="ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898961 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3"} err="failed to get container status \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": rpc error: code = NotFound desc = could not find container \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": container with ID starting with ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.898983 4682 scope.go:117] "RemoveContainer" containerID="e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.899346 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5"} err="failed to get container status \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": rpc error: code = NotFound desc = could not find container \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": container with ID starting with e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.899369 4682 scope.go:117] "RemoveContainer" containerID="fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.900070 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290"} err="failed to get container status \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": rpc error: code = NotFound desc = could not find container \"fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290\": container with ID starting with fe9f517a2789ed9c349d8a03e98132eb1ac32f66d2e8e0dfa0ae98d6f4d75290 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.900092 4682 scope.go:117] "RemoveContainer" containerID="115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.900677 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5"} err="failed to get container status \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": rpc error: code = NotFound desc = could not find container \"115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5\": container with ID starting with 115618e364fa99df284f239502d2da778de4cd005d7ca30e2dd4f949974d78e5 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.900702 4682 scope.go:117] "RemoveContainer" containerID="a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.902857 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca"} err="failed to get container status \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": rpc error: code = NotFound desc = could not find container \"a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca\": container with ID starting with a0b1b02cf9b19c3ce91b8a51318dd07a5949d98eaf2b042a78dd4c3bb4e083ca not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.902881 4682 scope.go:117] "RemoveContainer" containerID="bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.903178 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50"} err="failed to get container status \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": rpc error: code = NotFound desc = could not find container \"bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50\": container with ID starting with bd7466a9f4aec7ce534edd694e162c89119b31af82fb6dd82f3d940a891e3f50 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.903201 4682 scope.go:117] "RemoveContainer" containerID="fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.906860 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487"} err="failed to get container status \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": rpc error: code = NotFound desc = could not find container \"fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487\": container with ID starting with fd9d755d926ef68d258c179c4938e87d385769d4614077df183e2269a3ea0487 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.906900 4682 scope.go:117] "RemoveContainer" containerID="45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.913832 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506"} err="failed to get container status \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": rpc error: code = NotFound desc = could not find container \"45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506\": container with ID starting with 45057db4858947ae3c4ab68202a3dfcb03ee8d9ef8911cb87f972359d3594506 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.913873 4682 scope.go:117] "RemoveContainer" containerID="8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.916571 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33"} err="failed to get container status \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": rpc error: code = NotFound desc = could not find container \"8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33\": container with ID starting with 8358243b4f4b691dd5b474c524fd9b02e9ef4399eb12f86a3b16994fc34e8a33 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.916619 4682 scope.go:117] "RemoveContainer" containerID="ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.921921 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3"} err="failed to get container status \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": rpc error: code = NotFound desc = could not find container \"ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3\": container with ID starting with ab2ca0cd192fad9a6fe8ca45642e47f9add6a6b15993c07de83908c954ce82a3 not found: ID does not exist" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.921967 4682 scope.go:117] "RemoveContainer" containerID="e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5" Dec 10 10:57:34 crc kubenswrapper[4682]: I1210 10:57:34.922330 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5"} err="failed to get container status \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": rpc error: code = NotFound desc = could not find container \"e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5\": container with ID starting with e6f882f17694cc3e684e608cee0f8ca0d1a4277427ca66b1b0cfec7209ffd0a5 not found: ID does not exist" Dec 10 10:57:35 crc kubenswrapper[4682]: I1210 10:57:35.700847 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"28cac73d6c95e849eb4c77bb939da6d7c923c72d96c0cc767587a1634f4d1777"} Dec 10 10:57:35 crc kubenswrapper[4682]: I1210 10:57:35.701190 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"83a6123b0f359861ce4c6769dddbc4af12bc1debd474670481610b178b3d7973"} Dec 10 10:57:35 crc kubenswrapper[4682]: I1210 10:57:35.701212 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"2cb84002d5e6e676847ff9da709e1bf6670210b1d162bc2b91fd04b4b8d91e42"} Dec 10 10:57:35 crc kubenswrapper[4682]: I1210 10:57:35.701222 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"9b3536cb23313835f58242e57306718646021d4752d95d3a6c23bfadbe278a50"} Dec 10 10:57:35 crc kubenswrapper[4682]: I1210 10:57:35.701234 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"d56e189a5a69b545ad6838795b349b659b5c1cc068314e2c45ccd603de083667"} Dec 10 10:57:35 crc kubenswrapper[4682]: I1210 10:57:35.701244 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"e20a9ba9ac68560c03417da212f81931b94a053c1cc67d9407e40f2a9a6b9d6f"} Dec 10 10:57:36 crc kubenswrapper[4682]: I1210 10:57:36.388350 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d4402e6-a6f6-4970-8392-9f1856b52eb4" path="/var/lib/kubelet/pods/0d4402e6-a6f6-4970-8392-9f1856b52eb4/volumes" Dec 10 10:57:36 crc kubenswrapper[4682]: I1210 10:57:36.478721 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:57:36 crc kubenswrapper[4682]: I1210 10:57:36.478772 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:57:37 crc kubenswrapper[4682]: I1210 10:57:37.715425 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"b7053497eec28aef4ae44f96c29f84b58dcba33a0c92206c3217b27646b86cca"} Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.731371 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" event={"ID":"3c8fd450-01c2-47e2-8dfa-018323dc5eab","Type":"ContainerStarted","Data":"958867b3d6fb2d027a9ca5d4d89fde668910d690d8735bb3817a891078040f44"} Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.731948 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.731962 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.745576 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g"] Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.746172 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.747841 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-bbknn" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.747895 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.751521 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.760928 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.782720 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" podStartSLOduration=7.782702917 podStartE2EDuration="7.782702917s" podCreationTimestamp="2025-12-10 10:57:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 10:57:40.779162005 +0000 UTC m=+741.099372765" watchObservedRunningTime="2025-12-10 10:57:40.782702917 +0000 UTC m=+741.102913667" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.826022 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lghwb\" (UniqueName: \"kubernetes.io/projected/c67b5f8a-c145-46aa-8074-32612df1d2a2-kube-api-access-lghwb\") pod \"obo-prometheus-operator-668cf9dfbb-jkr7g\" (UID: \"c67b5f8a-c145-46aa-8074-32612df1d2a2\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.927611 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lghwb\" (UniqueName: \"kubernetes.io/projected/c67b5f8a-c145-46aa-8074-32612df1d2a2-kube-api-access-lghwb\") pod \"obo-prometheus-operator-668cf9dfbb-jkr7g\" (UID: \"c67b5f8a-c145-46aa-8074-32612df1d2a2\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:40 crc kubenswrapper[4682]: I1210 10:57:40.961107 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lghwb\" (UniqueName: \"kubernetes.io/projected/c67b5f8a-c145-46aa-8074-32612df1d2a2-kube-api-access-lghwb\") pod \"obo-prometheus-operator-668cf9dfbb-jkr7g\" (UID: \"c67b5f8a-c145-46aa-8074-32612df1d2a2\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.062622 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.088182 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(0360ac97efcf0ca07619ea289f6c9979d5ff121c7704290fdf32e60842caf6cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.088261 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(0360ac97efcf0ca07619ea289f6c9979d5ff121c7704290fdf32e60842caf6cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.088287 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(0360ac97efcf0ca07619ea289f6c9979d5ff121c7704290fdf32e60842caf6cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.088340 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators(c67b5f8a-c145-46aa-8074-32612df1d2a2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators(c67b5f8a-c145-46aa-8074-32612df1d2a2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(0360ac97efcf0ca07619ea289f6c9979d5ff121c7704290fdf32e60842caf6cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" podUID="c67b5f8a-c145-46aa-8074-32612df1d2a2" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.499384 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n"] Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.500046 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.502569 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.502932 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-lb6bd" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.510318 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t"] Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.511217 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.536324 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n\" (UID: \"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.536413 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/26e76d66-6fe7-4796-b0e6-d767d3f12d22-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t\" (UID: \"26e76d66-6fe7-4796-b0e6-d767d3f12d22\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.536458 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n\" (UID: \"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.536597 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/26e76d66-6fe7-4796-b0e6-d767d3f12d22-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t\" (UID: \"26e76d66-6fe7-4796-b0e6-d767d3f12d22\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.633161 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-pgsh6"] Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.633852 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.637261 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-dw65f" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.637281 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/26e76d66-6fe7-4796-b0e6-d767d3f12d22-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t\" (UID: \"26e76d66-6fe7-4796-b0e6-d767d3f12d22\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.637363 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n\" (UID: \"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.637396 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/26e76d66-6fe7-4796-b0e6-d767d3f12d22-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t\" (UID: \"26e76d66-6fe7-4796-b0e6-d767d3f12d22\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.637422 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n\" (UID: \"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.638774 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.642242 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n\" (UID: \"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.646294 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/26e76d66-6fe7-4796-b0e6-d767d3f12d22-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t\" (UID: \"26e76d66-6fe7-4796-b0e6-d767d3f12d22\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.646911 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n\" (UID: \"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.647376 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/26e76d66-6fe7-4796-b0e6-d767d3f12d22-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t\" (UID: \"26e76d66-6fe7-4796-b0e6-d767d3f12d22\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.737180 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.738099 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxmpf\" (UniqueName: \"kubernetes.io/projected/98eb9d3b-204c-4e4e-ac7e-484ac354bbca-kube-api-access-xxmpf\") pod \"observability-operator-d8bb48f5d-pgsh6\" (UID: \"98eb9d3b-204c-4e4e-ac7e-484ac354bbca\") " pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.738198 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/98eb9d3b-204c-4e4e-ac7e-484ac354bbca-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-pgsh6\" (UID: \"98eb9d3b-204c-4e4e-ac7e-484ac354bbca\") " pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.813791 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.824733 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.839490 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.839879 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/98eb9d3b-204c-4e4e-ac7e-484ac354bbca-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-pgsh6\" (UID: \"98eb9d3b-204c-4e4e-ac7e-484ac354bbca\") " pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.839948 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxmpf\" (UniqueName: \"kubernetes.io/projected/98eb9d3b-204c-4e4e-ac7e-484ac354bbca-kube-api-access-xxmpf\") pod \"observability-operator-d8bb48f5d-pgsh6\" (UID: \"98eb9d3b-204c-4e4e-ac7e-484ac354bbca\") " pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.846091 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/98eb9d3b-204c-4e4e-ac7e-484ac354bbca-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-pgsh6\" (UID: \"98eb9d3b-204c-4e4e-ac7e-484ac354bbca\") " pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.856101 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(fcb8cf6ca91d5ce42e6ccc4f0d29b65befe9ceec9239e4b61d221cd535e6f400): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.856167 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(fcb8cf6ca91d5ce42e6ccc4f0d29b65befe9ceec9239e4b61d221cd535e6f400): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.856191 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(fcb8cf6ca91d5ce42e6ccc4f0d29b65befe9ceec9239e4b61d221cd535e6f400): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.856239 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators(7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators(7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(fcb8cf6ca91d5ce42e6ccc4f0d29b65befe9ceec9239e4b61d221cd535e6f400): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" podUID="7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.868381 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxmpf\" (UniqueName: \"kubernetes.io/projected/98eb9d3b-204c-4e4e-ac7e-484ac354bbca-kube-api-access-xxmpf\") pod \"observability-operator-d8bb48f5d-pgsh6\" (UID: \"98eb9d3b-204c-4e4e-ac7e-484ac354bbca\") " pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.870204 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8e6a4326f2ceff88dc11f598373495449644c056d880a831334a8acf2b74df54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.870269 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8e6a4326f2ceff88dc11f598373495449644c056d880a831334a8acf2b74df54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.870293 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8e6a4326f2ceff88dc11f598373495449644c056d880a831334a8acf2b74df54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:41 crc kubenswrapper[4682]: E1210 10:57:41.870338 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators(26e76d66-6fe7-4796-b0e6-d767d3f12d22)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators(26e76d66-6fe7-4796-b0e6-d767d3f12d22)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8e6a4326f2ceff88dc11f598373495449644c056d880a831334a8acf2b74df54): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" podUID="26e76d66-6fe7-4796-b0e6-d767d3f12d22" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.873764 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-d5rcq"] Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.884384 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.887645 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-dvw5p" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.941657 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/690f858b-11ca-4449-89ed-5f3fb287113e-openshift-service-ca\") pod \"perses-operator-5446b9c989-d5rcq\" (UID: \"690f858b-11ca-4449-89ed-5f3fb287113e\") " pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.941720 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtmrb\" (UniqueName: \"kubernetes.io/projected/690f858b-11ca-4449-89ed-5f3fb287113e-kube-api-access-dtmrb\") pod \"perses-operator-5446b9c989-d5rcq\" (UID: \"690f858b-11ca-4449-89ed-5f3fb287113e\") " pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:41 crc kubenswrapper[4682]: I1210 10:57:41.991869 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.013349 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(6fcabc06a3359f08546ebf6d17b700a3826c8836ed9e596c0de770117570e87c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.013444 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(6fcabc06a3359f08546ebf6d17b700a3826c8836ed9e596c0de770117570e87c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.013496 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(6fcabc06a3359f08546ebf6d17b700a3826c8836ed9e596c0de770117570e87c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.013542 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-pgsh6_openshift-operators(98eb9d3b-204c-4e4e-ac7e-484ac354bbca)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-pgsh6_openshift-operators(98eb9d3b-204c-4e4e-ac7e-484ac354bbca)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(6fcabc06a3359f08546ebf6d17b700a3826c8836ed9e596c0de770117570e87c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" podUID="98eb9d3b-204c-4e4e-ac7e-484ac354bbca" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.042684 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/690f858b-11ca-4449-89ed-5f3fb287113e-openshift-service-ca\") pod \"perses-operator-5446b9c989-d5rcq\" (UID: \"690f858b-11ca-4449-89ed-5f3fb287113e\") " pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.042753 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtmrb\" (UniqueName: \"kubernetes.io/projected/690f858b-11ca-4449-89ed-5f3fb287113e-kube-api-access-dtmrb\") pod \"perses-operator-5446b9c989-d5rcq\" (UID: \"690f858b-11ca-4449-89ed-5f3fb287113e\") " pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.043595 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/690f858b-11ca-4449-89ed-5f3fb287113e-openshift-service-ca\") pod \"perses-operator-5446b9c989-d5rcq\" (UID: \"690f858b-11ca-4449-89ed-5f3fb287113e\") " pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.061283 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtmrb\" (UniqueName: \"kubernetes.io/projected/690f858b-11ca-4449-89ed-5f3fb287113e-kube-api-access-dtmrb\") pod \"perses-operator-5446b9c989-d5rcq\" (UID: \"690f858b-11ca-4449-89ed-5f3fb287113e\") " pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.198105 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.242158 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(ca63e46587f07ba3ea616189632ae9f1cb633c3f02a7346e109b9e1cfde1454a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.242239 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(ca63e46587f07ba3ea616189632ae9f1cb633c3f02a7346e109b9e1cfde1454a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.242269 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(ca63e46587f07ba3ea616189632ae9f1cb633c3f02a7346e109b9e1cfde1454a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.242315 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-d5rcq_openshift-operators(690f858b-11ca-4449-89ed-5f3fb287113e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-d5rcq_openshift-operators(690f858b-11ca-4449-89ed-5f3fb287113e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(ca63e46587f07ba3ea616189632ae9f1cb633c3f02a7346e109b9e1cfde1454a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" podUID="690f858b-11ca-4449-89ed-5f3fb287113e" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.515043 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n"] Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.518111 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-pgsh6"] Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.528867 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t"] Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.531724 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g"] Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.531821 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.532145 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.550773 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-d5rcq"] Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.557314 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(c77f957f4a5ecb96af6b83e663a02ccd16e17983489af34be86601e722593911): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.557389 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(c77f957f4a5ecb96af6b83e663a02ccd16e17983489af34be86601e722593911): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.557416 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(c77f957f4a5ecb96af6b83e663a02ccd16e17983489af34be86601e722593911): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.557483 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators(c67b5f8a-c145-46aa-8074-32612df1d2a2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators(c67b5f8a-c145-46aa-8074-32612df1d2a2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jkr7g_openshift-operators_c67b5f8a-c145-46aa-8074-32612df1d2a2_0(c77f957f4a5ecb96af6b83e663a02ccd16e17983489af34be86601e722593911): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" podUID="c67b5f8a-c145-46aa-8074-32612df1d2a2" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.741083 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.741295 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.741604 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.742390 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.742656 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.742887 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.743104 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: I1210 10:57:42.741605 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.776444 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8f28a08f3021a2e29f0bb718a2dcd287139bb079e6b5dfc8c7a602668d08bc24): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.776534 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8f28a08f3021a2e29f0bb718a2dcd287139bb079e6b5dfc8c7a602668d08bc24): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.776562 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8f28a08f3021a2e29f0bb718a2dcd287139bb079e6b5dfc8c7a602668d08bc24): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.776627 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators(26e76d66-6fe7-4796-b0e6-d767d3f12d22)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators(26e76d66-6fe7-4796-b0e6-d767d3f12d22)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_openshift-operators_26e76d66-6fe7-4796-b0e6-d767d3f12d22_0(8f28a08f3021a2e29f0bb718a2dcd287139bb079e6b5dfc8c7a602668d08bc24): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" podUID="26e76d66-6fe7-4796-b0e6-d767d3f12d22" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.800659 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(e5f191211f699120b4eb3f8506a1b97cf7a110a1009cdbdd3cc176adf6d813cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.800732 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(e5f191211f699120b4eb3f8506a1b97cf7a110a1009cdbdd3cc176adf6d813cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.800759 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(e5f191211f699120b4eb3f8506a1b97cf7a110a1009cdbdd3cc176adf6d813cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.800808 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-d5rcq_openshift-operators(690f858b-11ca-4449-89ed-5f3fb287113e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-d5rcq_openshift-operators(690f858b-11ca-4449-89ed-5f3fb287113e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-d5rcq_openshift-operators_690f858b-11ca-4449-89ed-5f3fb287113e_0(e5f191211f699120b4eb3f8506a1b97cf7a110a1009cdbdd3cc176adf6d813cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" podUID="690f858b-11ca-4449-89ed-5f3fb287113e" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.808744 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(ec17a80bb6f89502f3afec7894d78da67361c6e503d326bf2913128476fb73cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.808812 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(ec17a80bb6f89502f3afec7894d78da67361c6e503d326bf2913128476fb73cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.808839 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(ec17a80bb6f89502f3afec7894d78da67361c6e503d326bf2913128476fb73cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.808908 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-pgsh6_openshift-operators(98eb9d3b-204c-4e4e-ac7e-484ac354bbca)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-pgsh6_openshift-operators(98eb9d3b-204c-4e4e-ac7e-484ac354bbca)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pgsh6_openshift-operators_98eb9d3b-204c-4e4e-ac7e-484ac354bbca_0(ec17a80bb6f89502f3afec7894d78da67361c6e503d326bf2913128476fb73cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" podUID="98eb9d3b-204c-4e4e-ac7e-484ac354bbca" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.813057 4682 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(8ba89216766ddd8a0915e6f51c6015f8d5fa6f56939771fc65790f51e125b5c3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.813110 4682 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(8ba89216766ddd8a0915e6f51c6015f8d5fa6f56939771fc65790f51e125b5c3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.813136 4682 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(8ba89216766ddd8a0915e6f51c6015f8d5fa6f56939771fc65790f51e125b5c3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:42 crc kubenswrapper[4682]: E1210 10:57:42.813183 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators(7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators(7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_openshift-operators_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa_0(8ba89216766ddd8a0915e6f51c6015f8d5fa6f56939771fc65790f51e125b5c3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" podUID="7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa" Dec 10 10:57:53 crc kubenswrapper[4682]: I1210 10:57:53.381668 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:53 crc kubenswrapper[4682]: I1210 10:57:53.382501 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:57:53 crc kubenswrapper[4682]: I1210 10:57:53.587297 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-d5rcq"] Dec 10 10:57:53 crc kubenswrapper[4682]: I1210 10:57:53.798068 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" event={"ID":"690f858b-11ca-4449-89ed-5f3fb287113e","Type":"ContainerStarted","Data":"1867cedbcde49b93799e2dfdaf484e668effd65efe8aa4f63ce04841691d966d"} Dec 10 10:57:54 crc kubenswrapper[4682]: I1210 10:57:54.380190 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:54 crc kubenswrapper[4682]: I1210 10:57:54.380914 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" Dec 10 10:57:54 crc kubenswrapper[4682]: I1210 10:57:54.764509 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n"] Dec 10 10:57:54 crc kubenswrapper[4682]: I1210 10:57:54.811713 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" event={"ID":"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa","Type":"ContainerStarted","Data":"e1d4e9f344ff820e6277e5bebd519fb68210a75fb7b24820db12fea6fe245cd5"} Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.380958 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.381216 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.381546 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.381737 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.381973 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.382174 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.844442 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g"] Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.938056 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-pgsh6"] Dec 10 10:57:55 crc kubenswrapper[4682]: I1210 10:57:55.992884 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t"] Dec 10 10:57:56 crc kubenswrapper[4682]: W1210 10:57:56.010633 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26e76d66_6fe7_4796_b0e6_d767d3f12d22.slice/crio-2cb84fff2caf2185a5f00c24129af81335f0c880508b630abea6334cf4e2e978 WatchSource:0}: Error finding container 2cb84fff2caf2185a5f00c24129af81335f0c880508b630abea6334cf4e2e978: Status 404 returned error can't find the container with id 2cb84fff2caf2185a5f00c24129af81335f0c880508b630abea6334cf4e2e978 Dec 10 10:57:56 crc kubenswrapper[4682]: I1210 10:57:56.855695 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" event={"ID":"98eb9d3b-204c-4e4e-ac7e-484ac354bbca","Type":"ContainerStarted","Data":"e45583e9ed0519fe0d216efa5dd9ee7677771923312840912dfb9f6beba82989"} Dec 10 10:57:56 crc kubenswrapper[4682]: I1210 10:57:56.859579 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" event={"ID":"26e76d66-6fe7-4796-b0e6-d767d3f12d22","Type":"ContainerStarted","Data":"2cb84fff2caf2185a5f00c24129af81335f0c880508b630abea6334cf4e2e978"} Dec 10 10:57:56 crc kubenswrapper[4682]: I1210 10:57:56.862030 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" event={"ID":"c67b5f8a-c145-46aa-8074-32612df1d2a2","Type":"ContainerStarted","Data":"13c347e759e75928f3340f1d46bbb60928ffa3998b092e243956255fbc825508"} Dec 10 10:58:01 crc kubenswrapper[4682]: I1210 10:58:01.521764 4682 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 10:58:04 crc kubenswrapper[4682]: I1210 10:58:04.233120 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-twn8s" Dec 10 10:58:06 crc kubenswrapper[4682]: I1210 10:58:06.479820 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:58:06 crc kubenswrapper[4682]: I1210 10:58:06.480158 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:58:08 crc kubenswrapper[4682]: E1210 10:58:08.307767 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385" Dec 10 10:58:08 crc kubenswrapper[4682]: E1210 10:58:08.307958 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:perses-operator,Image:registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openshift-service-ca,ReadOnly:true,MountPath:/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dtmrb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000350000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod perses-operator-5446b9c989-d5rcq_openshift-operators(690f858b-11ca-4449-89ed-5f3fb287113e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 10:58:08 crc kubenswrapper[4682]: E1210 10:58:08.309136 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" podUID="690f858b-11ca-4449-89ed-5f3fb287113e" Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.934277 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" event={"ID":"98eb9d3b-204c-4e4e-ac7e-484ac354bbca","Type":"ContainerStarted","Data":"1f2fa817fa4a149fd90f8def42bc889ccd86c937ba9619aa8022368b3b80b35e"} Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.934661 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.936584 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" event={"ID":"26e76d66-6fe7-4796-b0e6-d767d3f12d22","Type":"ContainerStarted","Data":"f924cee49b660fed0b8e27dd8babb305e8c155ca6120f04ce33aa010556d3e76"} Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.940048 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" event={"ID":"7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa","Type":"ContainerStarted","Data":"d40255bc3a8e368413a4f32bcd1ce73e70ade5e13e7ddea6cd6f5007dd7d833d"} Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.943004 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" event={"ID":"c67b5f8a-c145-46aa-8074-32612df1d2a2","Type":"ContainerStarted","Data":"2b6d7d281f73cb730b09d9936d13a4c12f45f5362e6e01948b48f8c6424c3806"} Dec 10 10:58:08 crc kubenswrapper[4682]: E1210 10:58:08.943954 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"perses-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/perses-rhel9-operator@sha256:9aec4c328ec43e40481e06ca5808deead74b75c0aacb90e9e72966c3fa14f385\\\"\"" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" podUID="690f858b-11ca-4449-89ed-5f3fb287113e" Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.956924 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" podStartSLOduration=15.544980337 podStartE2EDuration="27.956904869s" podCreationTimestamp="2025-12-10 10:57:41 +0000 UTC" firstStartedPulling="2025-12-10 10:57:55.966935636 +0000 UTC m=+756.287146396" lastFinishedPulling="2025-12-10 10:58:08.378860178 +0000 UTC m=+768.699070928" observedRunningTime="2025-12-10 10:58:08.956802266 +0000 UTC m=+769.277013056" watchObservedRunningTime="2025-12-10 10:58:08.956904869 +0000 UTC m=+769.277115609" Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.979429 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n" podStartSLOduration=14.415268191 podStartE2EDuration="27.979415805s" podCreationTimestamp="2025-12-10 10:57:41 +0000 UTC" firstStartedPulling="2025-12-10 10:57:54.775519425 +0000 UTC m=+755.095730165" lastFinishedPulling="2025-12-10 10:58:08.339667029 +0000 UTC m=+768.659877779" observedRunningTime="2025-12-10 10:58:08.97796075 +0000 UTC m=+769.298171510" watchObservedRunningTime="2025-12-10 10:58:08.979415805 +0000 UTC m=+769.299626545" Dec 10 10:58:08 crc kubenswrapper[4682]: I1210 10:58:08.989761 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-pgsh6" Dec 10 10:58:09 crc kubenswrapper[4682]: I1210 10:58:09.006591 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t" podStartSLOduration=15.661459532 podStartE2EDuration="28.006573898s" podCreationTimestamp="2025-12-10 10:57:41 +0000 UTC" firstStartedPulling="2025-12-10 10:57:56.013459956 +0000 UTC m=+756.333670706" lastFinishedPulling="2025-12-10 10:58:08.358574322 +0000 UTC m=+768.678785072" observedRunningTime="2025-12-10 10:58:09.005150333 +0000 UTC m=+769.325361083" watchObservedRunningTime="2025-12-10 10:58:09.006573898 +0000 UTC m=+769.326784648" Dec 10 10:58:09 crc kubenswrapper[4682]: I1210 10:58:09.026699 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jkr7g" podStartSLOduration=16.584695694 podStartE2EDuration="29.026682928s" podCreationTimestamp="2025-12-10 10:57:40 +0000 UTC" firstStartedPulling="2025-12-10 10:57:55.913111258 +0000 UTC m=+756.233322008" lastFinishedPulling="2025-12-10 10:58:08.355098482 +0000 UTC m=+768.675309242" observedRunningTime="2025-12-10 10:58:09.025414959 +0000 UTC m=+769.345625709" watchObservedRunningTime="2025-12-10 10:58:09.026682928 +0000 UTC m=+769.346893678" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.249205 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mqkpw"] Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.250414 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.256483 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.256746 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.259504 4682 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-x79gh" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.268140 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-zsncd"] Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.269564 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-zsncd" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.271491 4682 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wbczl" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.274969 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mqkpw"] Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.281294 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-q7f8s"] Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.282042 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.286748 4682 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-7shb6" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.291059 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-zsncd"] Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.308144 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkv64\" (UniqueName: \"kubernetes.io/projected/2ad8f556-5e94-447b-9ec3-cd5c29885e2a-kube-api-access-qkv64\") pod \"cert-manager-cainjector-7f985d654d-mqkpw\" (UID: \"2ad8f556-5e94-447b-9ec3-cd5c29885e2a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.316857 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-q7f8s"] Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.409028 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkv64\" (UniqueName: \"kubernetes.io/projected/2ad8f556-5e94-447b-9ec3-cd5c29885e2a-kube-api-access-qkv64\") pod \"cert-manager-cainjector-7f985d654d-mqkpw\" (UID: \"2ad8f556-5e94-447b-9ec3-cd5c29885e2a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.409152 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw8bs\" (UniqueName: \"kubernetes.io/projected/34fe5718-bdcd-4e01-8d46-5033469ecee0-kube-api-access-qw8bs\") pod \"cert-manager-5b446d88c5-zsncd\" (UID: \"34fe5718-bdcd-4e01-8d46-5033469ecee0\") " pod="cert-manager/cert-manager-5b446d88c5-zsncd" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.409230 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-995xv\" (UniqueName: \"kubernetes.io/projected/e8f49724-e500-4735-9eaa-f28ab2fe7d34-kube-api-access-995xv\") pod \"cert-manager-webhook-5655c58dd6-q7f8s\" (UID: \"e8f49724-e500-4735-9eaa-f28ab2fe7d34\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.434398 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkv64\" (UniqueName: \"kubernetes.io/projected/2ad8f556-5e94-447b-9ec3-cd5c29885e2a-kube-api-access-qkv64\") pod \"cert-manager-cainjector-7f985d654d-mqkpw\" (UID: \"2ad8f556-5e94-447b-9ec3-cd5c29885e2a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.510134 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw8bs\" (UniqueName: \"kubernetes.io/projected/34fe5718-bdcd-4e01-8d46-5033469ecee0-kube-api-access-qw8bs\") pod \"cert-manager-5b446d88c5-zsncd\" (UID: \"34fe5718-bdcd-4e01-8d46-5033469ecee0\") " pod="cert-manager/cert-manager-5b446d88c5-zsncd" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.510489 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-995xv\" (UniqueName: \"kubernetes.io/projected/e8f49724-e500-4735-9eaa-f28ab2fe7d34-kube-api-access-995xv\") pod \"cert-manager-webhook-5655c58dd6-q7f8s\" (UID: \"e8f49724-e500-4735-9eaa-f28ab2fe7d34\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.526851 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-995xv\" (UniqueName: \"kubernetes.io/projected/e8f49724-e500-4735-9eaa-f28ab2fe7d34-kube-api-access-995xv\") pod \"cert-manager-webhook-5655c58dd6-q7f8s\" (UID: \"e8f49724-e500-4735-9eaa-f28ab2fe7d34\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.529042 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw8bs\" (UniqueName: \"kubernetes.io/projected/34fe5718-bdcd-4e01-8d46-5033469ecee0-kube-api-access-qw8bs\") pod \"cert-manager-5b446d88c5-zsncd\" (UID: \"34fe5718-bdcd-4e01-8d46-5033469ecee0\") " pod="cert-manager/cert-manager-5b446d88c5-zsncd" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.568685 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.588914 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-zsncd" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.597067 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.806296 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-zsncd"] Dec 10 10:58:16 crc kubenswrapper[4682]: W1210 10:58:16.813054 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34fe5718_bdcd_4e01_8d46_5033469ecee0.slice/crio-c635d6a7fe615724f2205a3f07cd8f7e1062869c01a2361eb8cb44a760c4d4f3 WatchSource:0}: Error finding container c635d6a7fe615724f2205a3f07cd8f7e1062869c01a2361eb8cb44a760c4d4f3: Status 404 returned error can't find the container with id c635d6a7fe615724f2205a3f07cd8f7e1062869c01a2361eb8cb44a760c4d4f3 Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.843557 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-q7f8s"] Dec 10 10:58:16 crc kubenswrapper[4682]: W1210 10:58:16.846595 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8f49724_e500_4735_9eaa_f28ab2fe7d34.slice/crio-a49c79c8fa6256d4fa925ee0c874fa82ebd6e80df5c582d03ea78de45cf38f65 WatchSource:0}: Error finding container a49c79c8fa6256d4fa925ee0c874fa82ebd6e80df5c582d03ea78de45cf38f65: Status 404 returned error can't find the container with id a49c79c8fa6256d4fa925ee0c874fa82ebd6e80df5c582d03ea78de45cf38f65 Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.968994 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mqkpw"] Dec 10 10:58:16 crc kubenswrapper[4682]: W1210 10:58:16.970151 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad8f556_5e94_447b_9ec3_cd5c29885e2a.slice/crio-70d35f182e903194ee58f5eacb466260cade4e3b37eaca286f405b9f1214e4ee WatchSource:0}: Error finding container 70d35f182e903194ee58f5eacb466260cade4e3b37eaca286f405b9f1214e4ee: Status 404 returned error can't find the container with id 70d35f182e903194ee58f5eacb466260cade4e3b37eaca286f405b9f1214e4ee Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.982238 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" event={"ID":"2ad8f556-5e94-447b-9ec3-cd5c29885e2a","Type":"ContainerStarted","Data":"70d35f182e903194ee58f5eacb466260cade4e3b37eaca286f405b9f1214e4ee"} Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.983366 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-zsncd" event={"ID":"34fe5718-bdcd-4e01-8d46-5033469ecee0","Type":"ContainerStarted","Data":"c635d6a7fe615724f2205a3f07cd8f7e1062869c01a2361eb8cb44a760c4d4f3"} Dec 10 10:58:16 crc kubenswrapper[4682]: I1210 10:58:16.984263 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" event={"ID":"e8f49724-e500-4735-9eaa-f28ab2fe7d34","Type":"ContainerStarted","Data":"a49c79c8fa6256d4fa925ee0c874fa82ebd6e80df5c582d03ea78de45cf38f65"} Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.023673 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" event={"ID":"2ad8f556-5e94-447b-9ec3-cd5c29885e2a","Type":"ContainerStarted","Data":"d89dbfd07ac7c1fabe3266da1f503e9f9df3db18981e4781422f2018707128d8"} Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.025110 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-zsncd" event={"ID":"34fe5718-bdcd-4e01-8d46-5033469ecee0","Type":"ContainerStarted","Data":"483c9c80c312eab9c94eb6dd17d4e049dd4863faf1a9912d48dd24b72db6548c"} Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.026702 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" event={"ID":"e8f49724-e500-4735-9eaa-f28ab2fe7d34","Type":"ContainerStarted","Data":"361f06aaa0d1eb7c0acb05a8d1e2aa33afb9f163ff36a3f57f0cefd5306eb8a9"} Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.026834 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.065145 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-mqkpw" podStartSLOduration=2.320846903 podStartE2EDuration="6.065128325s" podCreationTimestamp="2025-12-10 10:58:16 +0000 UTC" firstStartedPulling="2025-12-10 10:58:16.972013428 +0000 UTC m=+777.292224178" lastFinishedPulling="2025-12-10 10:58:20.71629485 +0000 UTC m=+781.036505600" observedRunningTime="2025-12-10 10:58:22.05868908 +0000 UTC m=+782.378899820" watchObservedRunningTime="2025-12-10 10:58:22.065128325 +0000 UTC m=+782.385339075" Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.093032 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-zsncd" podStartSLOduration=2.203107957 podStartE2EDuration="6.093015471s" podCreationTimestamp="2025-12-10 10:58:16 +0000 UTC" firstStartedPulling="2025-12-10 10:58:16.814936362 +0000 UTC m=+777.135147112" lastFinishedPulling="2025-12-10 10:58:20.704843886 +0000 UTC m=+781.025054626" observedRunningTime="2025-12-10 10:58:22.088929962 +0000 UTC m=+782.409140732" watchObservedRunningTime="2025-12-10 10:58:22.093015471 +0000 UTC m=+782.413226221" Dec 10 10:58:22 crc kubenswrapper[4682]: I1210 10:58:22.120712 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" podStartSLOduration=2.25370137 podStartE2EDuration="6.12069647s" podCreationTimestamp="2025-12-10 10:58:16 +0000 UTC" firstStartedPulling="2025-12-10 10:58:16.848572347 +0000 UTC m=+777.168783097" lastFinishedPulling="2025-12-10 10:58:20.715567437 +0000 UTC m=+781.035778197" observedRunningTime="2025-12-10 10:58:22.117098276 +0000 UTC m=+782.437309036" watchObservedRunningTime="2025-12-10 10:58:22.12069647 +0000 UTC m=+782.440907220" Dec 10 10:58:23 crc kubenswrapper[4682]: I1210 10:58:23.036121 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" event={"ID":"690f858b-11ca-4449-89ed-5f3fb287113e","Type":"ContainerStarted","Data":"aa0ddfc343036bf12ee48dd31c54f3aab0c00e5151b804ecf7e3f73ff6648c29"} Dec 10 10:58:23 crc kubenswrapper[4682]: I1210 10:58:23.055514 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" podStartSLOduration=13.694406956 podStartE2EDuration="42.055494393s" podCreationTimestamp="2025-12-10 10:57:41 +0000 UTC" firstStartedPulling="2025-12-10 10:57:53.598877728 +0000 UTC m=+753.919088478" lastFinishedPulling="2025-12-10 10:58:21.959965165 +0000 UTC m=+782.280175915" observedRunningTime="2025-12-10 10:58:23.053584843 +0000 UTC m=+783.373795603" watchObservedRunningTime="2025-12-10 10:58:23.055494393 +0000 UTC m=+783.375705143" Dec 10 10:58:26 crc kubenswrapper[4682]: I1210 10:58:26.601380 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-q7f8s" Dec 10 10:58:32 crc kubenswrapper[4682]: I1210 10:58:32.199734 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:58:32 crc kubenswrapper[4682]: I1210 10:58:32.202675 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-d5rcq" Dec 10 10:58:36 crc kubenswrapper[4682]: I1210 10:58:36.478822 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 10:58:36 crc kubenswrapper[4682]: I1210 10:58:36.479167 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 10:58:36 crc kubenswrapper[4682]: I1210 10:58:36.479217 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 10:58:36 crc kubenswrapper[4682]: I1210 10:58:36.479680 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c6bff78a240d5adae318d431b3e181644756793c403e51687d775ce4fb2cfb9a"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 10:58:36 crc kubenswrapper[4682]: I1210 10:58:36.479747 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://c6bff78a240d5adae318d431b3e181644756793c403e51687d775ce4fb2cfb9a" gracePeriod=600 Dec 10 10:58:37 crc kubenswrapper[4682]: I1210 10:58:37.112719 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="c6bff78a240d5adae318d431b3e181644756793c403e51687d775ce4fb2cfb9a" exitCode=0 Dec 10 10:58:37 crc kubenswrapper[4682]: I1210 10:58:37.112789 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"c6bff78a240d5adae318d431b3e181644756793c403e51687d775ce4fb2cfb9a"} Dec 10 10:58:37 crc kubenswrapper[4682]: I1210 10:58:37.113321 4682 scope.go:117] "RemoveContainer" containerID="24b3429e3e43a35cd2e6a0d08a5c397cb0299c4d8c6b5f72ac9981458cf65f39" Dec 10 10:58:38 crc kubenswrapper[4682]: I1210 10:58:38.120849 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"cb1f236ceb4d4541ff9535181be092107ce5f587a0c363e01762746593060db5"} Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.493352 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd"] Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.495291 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.496893 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.502959 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd"] Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.602394 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6kvk\" (UniqueName: \"kubernetes.io/projected/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-kube-api-access-k6kvk\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.602458 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-util\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.602652 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-bundle\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.703876 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-bundle\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.703983 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6kvk\" (UniqueName: \"kubernetes.io/projected/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-kube-api-access-k6kvk\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.704013 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-util\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.704441 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-bundle\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.704496 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-util\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.723281 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6kvk\" (UniqueName: \"kubernetes.io/projected/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-kube-api-access-k6kvk\") pod \"7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:53 crc kubenswrapper[4682]: I1210 10:58:53.821058 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:58:54 crc kubenswrapper[4682]: I1210 10:58:54.231056 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd"] Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.214324 4682 generic.go:334] "Generic (PLEG): container finished" podID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerID="eb265abeaa89a0cf940a50d0c23c2a4a7b7dd8523c4513d24a5f8f0db0720129" exitCode=0 Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.214395 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" event={"ID":"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b","Type":"ContainerDied","Data":"eb265abeaa89a0cf940a50d0c23c2a4a7b7dd8523c4513d24a5f8f0db0720129"} Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.214505 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" event={"ID":"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b","Type":"ContainerStarted","Data":"89d9a37d991af45be2e44ef4225789e3b77d9eec2c2945aad61425d744af7ad5"} Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.833491 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w85mw"] Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.834922 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.879508 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w85mw"] Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.930262 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1865d09a-4c60-4370-9cf4-378d20749b59-catalog-content\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.930326 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1865d09a-4c60-4370-9cf4-378d20749b59-utilities\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:55 crc kubenswrapper[4682]: I1210 10:58:55.930407 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lxrz\" (UniqueName: \"kubernetes.io/projected/1865d09a-4c60-4370-9cf4-378d20749b59-kube-api-access-7lxrz\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.034219 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1865d09a-4c60-4370-9cf4-378d20749b59-utilities\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.034306 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lxrz\" (UniqueName: \"kubernetes.io/projected/1865d09a-4c60-4370-9cf4-378d20749b59-kube-api-access-7lxrz\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.034376 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1865d09a-4c60-4370-9cf4-378d20749b59-catalog-content\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.034894 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1865d09a-4c60-4370-9cf4-378d20749b59-utilities\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.034945 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1865d09a-4c60-4370-9cf4-378d20749b59-catalog-content\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.062644 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lxrz\" (UniqueName: \"kubernetes.io/projected/1865d09a-4c60-4370-9cf4-378d20749b59-kube-api-access-7lxrz\") pod \"redhat-operators-w85mw\" (UID: \"1865d09a-4c60-4370-9cf4-378d20749b59\") " pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.155860 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.437290 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w85mw"] Dec 10 10:58:56 crc kubenswrapper[4682]: W1210 10:58:56.454223 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1865d09a_4c60_4370_9cf4_378d20749b59.slice/crio-f0e68625761e1a987dbfa7ef81ab218d105c544c9eff0b8a364181a4efdeb1fc WatchSource:0}: Error finding container f0e68625761e1a987dbfa7ef81ab218d105c544c9eff0b8a364181a4efdeb1fc: Status 404 returned error can't find the container with id f0e68625761e1a987dbfa7ef81ab218d105c544c9eff0b8a364181a4efdeb1fc Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.893521 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.894448 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.898787 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.899652 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.910341 4682 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-s69mn" Dec 10 10:58:56 crc kubenswrapper[4682]: I1210 10:58:56.910563 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.048151 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bf6tq\" (UniqueName: \"kubernetes.io/projected/e378f78e-b66e-43c9-94ee-2e456e7eff1c-kube-api-access-bf6tq\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") " pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.048219 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") " pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.149748 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bf6tq\" (UniqueName: \"kubernetes.io/projected/e378f78e-b66e-43c9-94ee-2e456e7eff1c-kube-api-access-bf6tq\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") " pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.149819 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") " pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.154311 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.154350 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bd13cf9363a103986de0467bf07b3f7f2ee4859770f24ee38535fa8bf4ab229e/globalmount\"" pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.173881 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bf6tq\" (UniqueName: \"kubernetes.io/projected/e378f78e-b66e-43c9-94ee-2e456e7eff1c-kube-api-access-bf6tq\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") " pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.227132 4682 generic.go:334] "Generic (PLEG): container finished" podID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerID="dd27d9285b4f11de4512defa4e1963395a7a9e16018a08211f8e11caced35265" exitCode=0 Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.227211 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" event={"ID":"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b","Type":"ContainerDied","Data":"dd27d9285b4f11de4512defa4e1963395a7a9e16018a08211f8e11caced35265"} Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.229407 4682 generic.go:334] "Generic (PLEG): container finished" podID="1865d09a-4c60-4370-9cf4-378d20749b59" containerID="63325a9a4c0f0b29db14ef84cec9d0ec3d327b30bda618cfed0f07c1f4a62f80" exitCode=0 Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.229459 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w85mw" event={"ID":"1865d09a-4c60-4370-9cf4-378d20749b59","Type":"ContainerDied","Data":"63325a9a4c0f0b29db14ef84cec9d0ec3d327b30bda618cfed0f07c1f4a62f80"} Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.229553 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w85mw" event={"ID":"1865d09a-4c60-4370-9cf4-378d20749b59","Type":"ContainerStarted","Data":"f0e68625761e1a987dbfa7ef81ab218d105c544c9eff0b8a364181a4efdeb1fc"} Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.230256 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3cecb090-7822-4d6d-8f2f-697a197c533c\") pod \"minio\" (UID: \"e378f78e-b66e-43c9-94ee-2e456e7eff1c\") " pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.507187 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 10 10:58:57 crc kubenswrapper[4682]: I1210 10:58:57.894590 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 10 10:58:57 crc kubenswrapper[4682]: W1210 10:58:57.899547 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode378f78e_b66e_43c9_94ee_2e456e7eff1c.slice/crio-101c660ed277d2882c75fb4ea74fa182a975d1f131e44a7604c6d048d31560d7 WatchSource:0}: Error finding container 101c660ed277d2882c75fb4ea74fa182a975d1f131e44a7604c6d048d31560d7: Status 404 returned error can't find the container with id 101c660ed277d2882c75fb4ea74fa182a975d1f131e44a7604c6d048d31560d7 Dec 10 10:58:58 crc kubenswrapper[4682]: I1210 10:58:58.235046 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"e378f78e-b66e-43c9-94ee-2e456e7eff1c","Type":"ContainerStarted","Data":"101c660ed277d2882c75fb4ea74fa182a975d1f131e44a7604c6d048d31560d7"} Dec 10 10:58:58 crc kubenswrapper[4682]: I1210 10:58:58.236826 4682 generic.go:334] "Generic (PLEG): container finished" podID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerID="1a11e78467ddf1b4632d370c66bf981eb1322018a060b56eb61dbe631cebfd40" exitCode=0 Dec 10 10:58:58 crc kubenswrapper[4682]: I1210 10:58:58.236858 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" event={"ID":"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b","Type":"ContainerDied","Data":"1a11e78467ddf1b4632d370c66bf981eb1322018a060b56eb61dbe631cebfd40"} Dec 10 10:58:59 crc kubenswrapper[4682]: I1210 10:58:59.974892 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.120612 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6kvk\" (UniqueName: \"kubernetes.io/projected/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-kube-api-access-k6kvk\") pod \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.121007 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-util\") pod \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.121074 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-bundle\") pod \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\" (UID: \"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b\") " Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.122363 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-bundle" (OuterVolumeSpecName: "bundle") pod "e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" (UID: "e52fc6a0-640e-4c38-b90d-93faeb8b8b7b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.145728 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-kube-api-access-k6kvk" (OuterVolumeSpecName: "kube-api-access-k6kvk") pod "e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" (UID: "e52fc6a0-640e-4c38-b90d-93faeb8b8b7b"). InnerVolumeSpecName "kube-api-access-k6kvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.222573 4682 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.222611 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6kvk\" (UniqueName: \"kubernetes.io/projected/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-kube-api-access-k6kvk\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.251424 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" event={"ID":"e52fc6a0-640e-4c38-b90d-93faeb8b8b7b","Type":"ContainerDied","Data":"89d9a37d991af45be2e44ef4225789e3b77d9eec2c2945aad61425d744af7ad5"} Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.251491 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89d9a37d991af45be2e44ef4225789e3b77d9eec2c2945aad61425d744af7ad5" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.251572 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.587860 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-util" (OuterVolumeSpecName: "util") pod "e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" (UID: "e52fc6a0-640e-4c38-b90d-93faeb8b8b7b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:59:00 crc kubenswrapper[4682]: I1210 10:59:00.628504 4682 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e52fc6a0-640e-4c38-b90d-93faeb8b8b7b-util\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.530566 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8"] Dec 10 10:59:05 crc kubenswrapper[4682]: E1210 10:59:05.531405 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="util" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.531420 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="util" Dec 10 10:59:05 crc kubenswrapper[4682]: E1210 10:59:05.531431 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="extract" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.531439 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="extract" Dec 10 10:59:05 crc kubenswrapper[4682]: E1210 10:59:05.531460 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="pull" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.531482 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="pull" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.531603 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="e52fc6a0-640e-4c38-b90d-93faeb8b8b7b" containerName="extract" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.532346 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.535832 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.535889 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.536989 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.537106 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.537492 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.537883 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-zj6xr" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.557110 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8"] Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.691242 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/351331cd-a02a-4356-9143-325ba6a4c72a-manager-config\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.691376 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dr5z\" (UniqueName: \"kubernetes.io/projected/351331cd-a02a-4356-9143-325ba6a4c72a-kube-api-access-5dr5z\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.691459 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-webhook-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.691560 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-apiservice-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.691603 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.793565 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/351331cd-a02a-4356-9143-325ba6a4c72a-manager-config\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.793629 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dr5z\" (UniqueName: \"kubernetes.io/projected/351331cd-a02a-4356-9143-325ba6a4c72a-kube-api-access-5dr5z\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.793663 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-webhook-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.793698 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-apiservice-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.793718 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.795570 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/351331cd-a02a-4356-9143-325ba6a4c72a-manager-config\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.800920 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.801577 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-apiservice-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.802351 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/351331cd-a02a-4356-9143-325ba6a4c72a-webhook-cert\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.833126 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dr5z\" (UniqueName: \"kubernetes.io/projected/351331cd-a02a-4356-9143-325ba6a4c72a-kube-api-access-5dr5z\") pod \"loki-operator-controller-manager-7fcbf8fdb4-m96c8\" (UID: \"351331cd-a02a-4356-9143-325ba6a4c72a\") " pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:05 crc kubenswrapper[4682]: I1210 10:59:05.847849 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:10 crc kubenswrapper[4682]: I1210 10:59:10.081218 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8"] Dec 10 10:59:10 crc kubenswrapper[4682]: W1210 10:59:10.081524 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod351331cd_a02a_4356_9143_325ba6a4c72a.slice/crio-430ca607d5ac0e75769cee93bd269edc867f30d5ddd0a11c637cf1835693890f WatchSource:0}: Error finding container 430ca607d5ac0e75769cee93bd269edc867f30d5ddd0a11c637cf1835693890f: Status 404 returned error can't find the container with id 430ca607d5ac0e75769cee93bd269edc867f30d5ddd0a11c637cf1835693890f Dec 10 10:59:10 crc kubenswrapper[4682]: I1210 10:59:10.325337 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w85mw" event={"ID":"1865d09a-4c60-4370-9cf4-378d20749b59","Type":"ContainerStarted","Data":"f3ac0e38acf8cf89e14c3d754184062a4d669fd388388081488d31cf3db2e4f4"} Dec 10 10:59:10 crc kubenswrapper[4682]: I1210 10:59:10.326820 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"e378f78e-b66e-43c9-94ee-2e456e7eff1c","Type":"ContainerStarted","Data":"5271b23f5c44680c0758f664f056bea28e99cafcd55bdc82ea343acbe9d40707"} Dec 10 10:59:10 crc kubenswrapper[4682]: I1210 10:59:10.327870 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" event={"ID":"351331cd-a02a-4356-9143-325ba6a4c72a","Type":"ContainerStarted","Data":"430ca607d5ac0e75769cee93bd269edc867f30d5ddd0a11c637cf1835693890f"} Dec 10 10:59:10 crc kubenswrapper[4682]: I1210 10:59:10.361456 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=4.339124663 podStartE2EDuration="16.361434598s" podCreationTimestamp="2025-12-10 10:58:54 +0000 UTC" firstStartedPulling="2025-12-10 10:58:57.902188224 +0000 UTC m=+818.222398974" lastFinishedPulling="2025-12-10 10:59:09.924498159 +0000 UTC m=+830.244708909" observedRunningTime="2025-12-10 10:59:10.359371503 +0000 UTC m=+830.679582253" watchObservedRunningTime="2025-12-10 10:59:10.361434598 +0000 UTC m=+830.681645348" Dec 10 10:59:11 crc kubenswrapper[4682]: I1210 10:59:11.334577 4682 generic.go:334] "Generic (PLEG): container finished" podID="1865d09a-4c60-4370-9cf4-378d20749b59" containerID="f3ac0e38acf8cf89e14c3d754184062a4d669fd388388081488d31cf3db2e4f4" exitCode=0 Dec 10 10:59:11 crc kubenswrapper[4682]: I1210 10:59:11.334697 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w85mw" event={"ID":"1865d09a-4c60-4370-9cf4-378d20749b59","Type":"ContainerDied","Data":"f3ac0e38acf8cf89e14c3d754184062a4d669fd388388081488d31cf3db2e4f4"} Dec 10 10:59:15 crc kubenswrapper[4682]: I1210 10:59:15.354656 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w85mw" event={"ID":"1865d09a-4c60-4370-9cf4-378d20749b59","Type":"ContainerStarted","Data":"1ea2dcc89765067a83f34d0bb2936566203ad7a5eafb5e3e76db57c718c0b46c"} Dec 10 10:59:15 crc kubenswrapper[4682]: I1210 10:59:15.378996 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w85mw" podStartSLOduration=2.596221076 podStartE2EDuration="20.378977497s" podCreationTimestamp="2025-12-10 10:58:55 +0000 UTC" firstStartedPulling="2025-12-10 10:58:57.230688245 +0000 UTC m=+817.550898995" lastFinishedPulling="2025-12-10 10:59:15.013444636 +0000 UTC m=+835.333655416" observedRunningTime="2025-12-10 10:59:15.373818683 +0000 UTC m=+835.694029443" watchObservedRunningTime="2025-12-10 10:59:15.378977497 +0000 UTC m=+835.699188267" Dec 10 10:59:16 crc kubenswrapper[4682]: I1210 10:59:16.156295 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:59:16 crc kubenswrapper[4682]: I1210 10:59:16.156696 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:59:17 crc kubenswrapper[4682]: I1210 10:59:17.220095 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w85mw" podUID="1865d09a-4c60-4370-9cf4-378d20749b59" containerName="registry-server" probeResult="failure" output=< Dec 10 10:59:17 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 10:59:17 crc kubenswrapper[4682]: > Dec 10 10:59:19 crc kubenswrapper[4682]: I1210 10:59:19.382681 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" event={"ID":"351331cd-a02a-4356-9143-325ba6a4c72a","Type":"ContainerStarted","Data":"3cddc2d0a1148a9552734d4631ddabc4fda824cb24c4d2811bb90d92e4005f31"} Dec 10 10:59:24 crc kubenswrapper[4682]: I1210 10:59:24.407759 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" event={"ID":"351331cd-a02a-4356-9143-325ba6a4c72a","Type":"ContainerStarted","Data":"1367dd5e3f6266b84d4df4dc905bc5cc591b811eedb62f09bcfc0fcff3616a22"} Dec 10 10:59:24 crc kubenswrapper[4682]: I1210 10:59:24.409786 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:24 crc kubenswrapper[4682]: I1210 10:59:24.411245 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" Dec 10 10:59:24 crc kubenswrapper[4682]: I1210 10:59:24.429934 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-7fcbf8fdb4-m96c8" podStartSLOduration=5.346893665 podStartE2EDuration="19.429919699s" podCreationTimestamp="2025-12-10 10:59:05 +0000 UTC" firstStartedPulling="2025-12-10 10:59:10.084972986 +0000 UTC m=+830.405183736" lastFinishedPulling="2025-12-10 10:59:24.16799902 +0000 UTC m=+844.488209770" observedRunningTime="2025-12-10 10:59:24.429828696 +0000 UTC m=+844.750039446" watchObservedRunningTime="2025-12-10 10:59:24.429919699 +0000 UTC m=+844.750130449" Dec 10 10:59:26 crc kubenswrapper[4682]: I1210 10:59:26.203971 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:59:26 crc kubenswrapper[4682]: I1210 10:59:26.256333 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w85mw" Dec 10 10:59:27 crc kubenswrapper[4682]: I1210 10:59:27.655047 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w85mw"] Dec 10 10:59:28 crc kubenswrapper[4682]: I1210 10:59:28.026510 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-479t2"] Dec 10 10:59:28 crc kubenswrapper[4682]: I1210 10:59:28.026792 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-479t2" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="registry-server" containerID="cri-o://47c6fb96bca99b4776878f3afc716b545aba74845a9555288fd1f84bb4e5340a" gracePeriod=2 Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.437995 4682 generic.go:334] "Generic (PLEG): container finished" podID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerID="47c6fb96bca99b4776878f3afc716b545aba74845a9555288fd1f84bb4e5340a" exitCode=0 Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.438294 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-479t2" event={"ID":"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c","Type":"ContainerDied","Data":"47c6fb96bca99b4776878f3afc716b545aba74845a9555288fd1f84bb4e5340a"} Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.492390 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.634865 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-catalog-content\") pod \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.634961 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-utilities\") pod \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.634998 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw6mf\" (UniqueName: \"kubernetes.io/projected/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-kube-api-access-gw6mf\") pod \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\" (UID: \"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c\") " Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.635794 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-utilities" (OuterVolumeSpecName: "utilities") pod "a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" (UID: "a7b64e32-864b-4ea9-b1b1-1aec3c503a8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.644757 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-kube-api-access-gw6mf" (OuterVolumeSpecName: "kube-api-access-gw6mf") pod "a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" (UID: "a7b64e32-864b-4ea9-b1b1-1aec3c503a8c"). InnerVolumeSpecName "kube-api-access-gw6mf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.736136 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.736175 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw6mf\" (UniqueName: \"kubernetes.io/projected/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-kube-api-access-gw6mf\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.765391 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" (UID: "a7b64e32-864b-4ea9-b1b1-1aec3c503a8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 10:59:29 crc kubenswrapper[4682]: I1210 10:59:29.837799 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.445153 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-479t2" event={"ID":"a7b64e32-864b-4ea9-b1b1-1aec3c503a8c","Type":"ContainerDied","Data":"d7f2ef751cf7defa2453903e710eee3da5e068d120c9ad1dc58f37efc9de0184"} Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.445211 4682 scope.go:117] "RemoveContainer" containerID="47c6fb96bca99b4776878f3afc716b545aba74845a9555288fd1f84bb4e5340a" Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.445247 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-479t2" Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.460307 4682 scope.go:117] "RemoveContainer" containerID="cfda238df6e257e2530ca90a85fbae3ab29043cab5afa397f43a51bf75b85f4f" Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.463712 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-479t2"] Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.470842 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-479t2"] Dec 10 10:59:30 crc kubenswrapper[4682]: I1210 10:59:30.478166 4682 scope.go:117] "RemoveContainer" containerID="909ff250f15e146e0c5387b4282017f9c03d92e6e459ba45e0601f81b6fb6355" Dec 10 10:59:32 crc kubenswrapper[4682]: I1210 10:59:32.388340 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" path="/var/lib/kubelet/pods/a7b64e32-864b-4ea9-b1b1-1aec3c503a8c/volumes" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.640419 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m9zr5"] Dec 10 10:59:47 crc kubenswrapper[4682]: E1210 10:59:47.641293 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="extract-utilities" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.641311 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="extract-utilities" Dec 10 10:59:47 crc kubenswrapper[4682]: E1210 10:59:47.641327 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="registry-server" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.641336 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="registry-server" Dec 10 10:59:47 crc kubenswrapper[4682]: E1210 10:59:47.641363 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="extract-content" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.641372 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="extract-content" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.641512 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7b64e32-864b-4ea9-b1b1-1aec3c503a8c" containerName="registry-server" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.642503 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.652543 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m9zr5"] Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.767533 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-utilities\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.767611 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjpnp\" (UniqueName: \"kubernetes.io/projected/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-kube-api-access-qjpnp\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.767807 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-catalog-content\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.869478 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-catalog-content\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.869578 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-utilities\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.869718 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjpnp\" (UniqueName: \"kubernetes.io/projected/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-kube-api-access-qjpnp\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.869942 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-catalog-content\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.870013 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-utilities\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.890622 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjpnp\" (UniqueName: \"kubernetes.io/projected/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-kube-api-access-qjpnp\") pod \"community-operators-m9zr5\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:47 crc kubenswrapper[4682]: I1210 10:59:47.970872 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:48 crc kubenswrapper[4682]: I1210 10:59:48.225308 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m9zr5"] Dec 10 10:59:48 crc kubenswrapper[4682]: I1210 10:59:48.553005 4682 generic.go:334] "Generic (PLEG): container finished" podID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerID="685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1" exitCode=0 Dec 10 10:59:48 crc kubenswrapper[4682]: I1210 10:59:48.553053 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9zr5" event={"ID":"a0737ecd-8d57-44e2-ac69-f9fb9f884dba","Type":"ContainerDied","Data":"685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1"} Dec 10 10:59:48 crc kubenswrapper[4682]: I1210 10:59:48.553085 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9zr5" event={"ID":"a0737ecd-8d57-44e2-ac69-f9fb9f884dba","Type":"ContainerStarted","Data":"a053377c2cf3bdb4cff0b7f8b68044693bf95765c6f944faa3966b20cf4c2a2e"} Dec 10 10:59:50 crc kubenswrapper[4682]: I1210 10:59:50.566326 4682 generic.go:334] "Generic (PLEG): container finished" podID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerID="6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235" exitCode=0 Dec 10 10:59:50 crc kubenswrapper[4682]: I1210 10:59:50.566381 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9zr5" event={"ID":"a0737ecd-8d57-44e2-ac69-f9fb9f884dba","Type":"ContainerDied","Data":"6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235"} Dec 10 10:59:51 crc kubenswrapper[4682]: I1210 10:59:51.580592 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9zr5" event={"ID":"a0737ecd-8d57-44e2-ac69-f9fb9f884dba","Type":"ContainerStarted","Data":"b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915"} Dec 10 10:59:57 crc kubenswrapper[4682]: I1210 10:59:57.972252 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:57 crc kubenswrapper[4682]: I1210 10:59:57.972903 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:58 crc kubenswrapper[4682]: I1210 10:59:58.009871 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 10:59:58 crc kubenswrapper[4682]: I1210 10:59:58.028418 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m9zr5" podStartSLOduration=8.522477739 podStartE2EDuration="11.028398533s" podCreationTimestamp="2025-12-10 10:59:47 +0000 UTC" firstStartedPulling="2025-12-10 10:59:48.554696565 +0000 UTC m=+868.874907335" lastFinishedPulling="2025-12-10 10:59:51.060617339 +0000 UTC m=+871.380828129" observedRunningTime="2025-12-10 10:59:51.607381077 +0000 UTC m=+871.927591837" watchObservedRunningTime="2025-12-10 10:59:58.028398533 +0000 UTC m=+878.348609303" Dec 10 10:59:58 crc kubenswrapper[4682]: I1210 10:59:58.659062 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.165582 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h"] Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.166533 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.168679 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.175713 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h"] Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.204728 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.225581 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw9d8\" (UniqueName: \"kubernetes.io/projected/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-kube-api-access-rw9d8\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.225686 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-config-volume\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.225712 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-secret-volume\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.326663 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-config-volume\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.326711 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-secret-volume\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.326764 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw9d8\" (UniqueName: \"kubernetes.io/projected/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-kube-api-access-rw9d8\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.327655 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-config-volume\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.333289 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-secret-volume\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.345284 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw9d8\" (UniqueName: \"kubernetes.io/projected/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-kube-api-access-rw9d8\") pod \"collect-profiles-29422740-l6l8h\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.425583 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m9zr5"] Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.511101 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.627361 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m9zr5" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="registry-server" containerID="cri-o://b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915" gracePeriod=2 Dec 10 11:00:00 crc kubenswrapper[4682]: I1210 11:00:00.915923 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h"] Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.536255 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.540395 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-catalog-content\") pod \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.540453 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjpnp\" (UniqueName: \"kubernetes.io/projected/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-kube-api-access-qjpnp\") pod \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.540506 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-utilities\") pod \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\" (UID: \"a0737ecd-8d57-44e2-ac69-f9fb9f884dba\") " Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.541492 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-utilities" (OuterVolumeSpecName: "utilities") pod "a0737ecd-8d57-44e2-ac69-f9fb9f884dba" (UID: "a0737ecd-8d57-44e2-ac69-f9fb9f884dba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.544620 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-kube-api-access-qjpnp" (OuterVolumeSpecName: "kube-api-access-qjpnp") pod "a0737ecd-8d57-44e2-ac69-f9fb9f884dba" (UID: "a0737ecd-8d57-44e2-ac69-f9fb9f884dba"). InnerVolumeSpecName "kube-api-access-qjpnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.594767 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a0737ecd-8d57-44e2-ac69-f9fb9f884dba" (UID: "a0737ecd-8d57-44e2-ac69-f9fb9f884dba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.633588 4682 generic.go:334] "Generic (PLEG): container finished" podID="edb14eb0-e25a-46c3-b8da-601fd04ad0a1" containerID="752892be936f60f0a96523b9422010c7b4158e5eaee9bdb8bbf30d7bed6fb485" exitCode=0 Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.633662 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" event={"ID":"edb14eb0-e25a-46c3-b8da-601fd04ad0a1","Type":"ContainerDied","Data":"752892be936f60f0a96523b9422010c7b4158e5eaee9bdb8bbf30d7bed6fb485"} Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.633689 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" event={"ID":"edb14eb0-e25a-46c3-b8da-601fd04ad0a1","Type":"ContainerStarted","Data":"a53f3b0e0b9b39038ba50c78bf1ce131a552d305764b6d13d1f5c26ca55a3f5c"} Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.635947 4682 generic.go:334] "Generic (PLEG): container finished" podID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerID="b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915" exitCode=0 Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.635995 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9zr5" event={"ID":"a0737ecd-8d57-44e2-ac69-f9fb9f884dba","Type":"ContainerDied","Data":"b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915"} Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.636024 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9zr5" event={"ID":"a0737ecd-8d57-44e2-ac69-f9fb9f884dba","Type":"ContainerDied","Data":"a053377c2cf3bdb4cff0b7f8b68044693bf95765c6f944faa3966b20cf4c2a2e"} Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.636044 4682 scope.go:117] "RemoveContainer" containerID="b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.636164 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9zr5" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.641426 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjpnp\" (UniqueName: \"kubernetes.io/projected/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-kube-api-access-qjpnp\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.641455 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.641480 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0737ecd-8d57-44e2-ac69-f9fb9f884dba-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.667545 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m9zr5"] Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.669910 4682 scope.go:117] "RemoveContainer" containerID="6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.673010 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m9zr5"] Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.683368 4682 scope.go:117] "RemoveContainer" containerID="685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.708915 4682 scope.go:117] "RemoveContainer" containerID="b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915" Dec 10 11:00:01 crc kubenswrapper[4682]: E1210 11:00:01.709440 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915\": container with ID starting with b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915 not found: ID does not exist" containerID="b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.709608 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915"} err="failed to get container status \"b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915\": rpc error: code = NotFound desc = could not find container \"b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915\": container with ID starting with b867aa2bb4d246ed01b244e85dcec0ccf4c39dc3e0296a5cebe92d9db43cc915 not found: ID does not exist" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.709643 4682 scope.go:117] "RemoveContainer" containerID="6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235" Dec 10 11:00:01 crc kubenswrapper[4682]: E1210 11:00:01.710012 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235\": container with ID starting with 6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235 not found: ID does not exist" containerID="6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.710042 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235"} err="failed to get container status \"6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235\": rpc error: code = NotFound desc = could not find container \"6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235\": container with ID starting with 6a84bedf3edefac09161b5bd3e0a81c5a4c07f1098583d7e01108d4ef9d6f235 not found: ID does not exist" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.710060 4682 scope.go:117] "RemoveContainer" containerID="685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1" Dec 10 11:00:01 crc kubenswrapper[4682]: E1210 11:00:01.710308 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1\": container with ID starting with 685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1 not found: ID does not exist" containerID="685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1" Dec 10 11:00:01 crc kubenswrapper[4682]: I1210 11:00:01.710335 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1"} err="failed to get container status \"685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1\": rpc error: code = NotFound desc = could not find container \"685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1\": container with ID starting with 685a5bf2a351c7497aa064115d30ed8c0432757030e87819b82cca95aacb06f1 not found: ID does not exist" Dec 10 11:00:02 crc kubenswrapper[4682]: I1210 11:00:02.392189 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" path="/var/lib/kubelet/pods/a0737ecd-8d57-44e2-ac69-f9fb9f884dba/volumes" Dec 10 11:00:02 crc kubenswrapper[4682]: I1210 11:00:02.873833 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.057601 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-secret-volume\") pod \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.057698 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-config-volume\") pod \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.057771 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw9d8\" (UniqueName: \"kubernetes.io/projected/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-kube-api-access-rw9d8\") pod \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\" (UID: \"edb14eb0-e25a-46c3-b8da-601fd04ad0a1\") " Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.058340 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-config-volume" (OuterVolumeSpecName: "config-volume") pod "edb14eb0-e25a-46c3-b8da-601fd04ad0a1" (UID: "edb14eb0-e25a-46c3-b8da-601fd04ad0a1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.062359 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-kube-api-access-rw9d8" (OuterVolumeSpecName: "kube-api-access-rw9d8") pod "edb14eb0-e25a-46c3-b8da-601fd04ad0a1" (UID: "edb14eb0-e25a-46c3-b8da-601fd04ad0a1"). InnerVolumeSpecName "kube-api-access-rw9d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.062707 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "edb14eb0-e25a-46c3-b8da-601fd04ad0a1" (UID: "edb14eb0-e25a-46c3-b8da-601fd04ad0a1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.159328 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.159370 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.159379 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rw9d8\" (UniqueName: \"kubernetes.io/projected/edb14eb0-e25a-46c3-b8da-601fd04ad0a1-kube-api-access-rw9d8\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.653092 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" event={"ID":"edb14eb0-e25a-46c3-b8da-601fd04ad0a1","Type":"ContainerDied","Data":"a53f3b0e0b9b39038ba50c78bf1ce131a552d305764b6d13d1f5c26ca55a3f5c"} Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.653511 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a53f3b0e0b9b39038ba50c78bf1ce131a552d305764b6d13d1f5c26ca55a3f5c" Dec 10 11:00:03 crc kubenswrapper[4682]: I1210 11:00:03.653173 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.036140 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-phpng"] Dec 10 11:00:07 crc kubenswrapper[4682]: E1210 11:00:07.037042 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="registry-server" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.037063 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="registry-server" Dec 10 11:00:07 crc kubenswrapper[4682]: E1210 11:00:07.037078 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="extract-utilities" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.037087 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="extract-utilities" Dec 10 11:00:07 crc kubenswrapper[4682]: E1210 11:00:07.037109 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="extract-content" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.037118 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="extract-content" Dec 10 11:00:07 crc kubenswrapper[4682]: E1210 11:00:07.037135 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb14eb0-e25a-46c3-b8da-601fd04ad0a1" containerName="collect-profiles" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.037143 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb14eb0-e25a-46c3-b8da-601fd04ad0a1" containerName="collect-profiles" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.037278 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0737ecd-8d57-44e2-ac69-f9fb9f884dba" containerName="registry-server" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.037291 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="edb14eb0-e25a-46c3-b8da-601fd04ad0a1" containerName="collect-profiles" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.038302 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.050422 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-phpng"] Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.129919 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-utilities\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.130206 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n866s\" (UniqueName: \"kubernetes.io/projected/4a488ea9-1fff-4f5a-9956-ec555529cea5-kube-api-access-n866s\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.130320 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-catalog-content\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.231993 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-utilities\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.232136 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n866s\" (UniqueName: \"kubernetes.io/projected/4a488ea9-1fff-4f5a-9956-ec555529cea5-kube-api-access-n866s\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.232283 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-catalog-content\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.232493 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-utilities\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.232703 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-catalog-content\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.256587 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n866s\" (UniqueName: \"kubernetes.io/projected/4a488ea9-1fff-4f5a-9956-ec555529cea5-kube-api-access-n866s\") pod \"certified-operators-phpng\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.366139 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:07 crc kubenswrapper[4682]: I1210 11:00:07.670343 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-phpng"] Dec 10 11:00:07 crc kubenswrapper[4682]: W1210 11:00:07.683625 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a488ea9_1fff_4f5a_9956_ec555529cea5.slice/crio-e52a3fa4966764c4d8edd90d2dec89ab7916ceeadb95a15dd4ecdde82d6c0a40 WatchSource:0}: Error finding container e52a3fa4966764c4d8edd90d2dec89ab7916ceeadb95a15dd4ecdde82d6c0a40: Status 404 returned error can't find the container with id e52a3fa4966764c4d8edd90d2dec89ab7916ceeadb95a15dd4ecdde82d6c0a40 Dec 10 11:00:08 crc kubenswrapper[4682]: I1210 11:00:08.680955 4682 generic.go:334] "Generic (PLEG): container finished" podID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerID="839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab" exitCode=0 Dec 10 11:00:08 crc kubenswrapper[4682]: I1210 11:00:08.681096 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerDied","Data":"839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab"} Dec 10 11:00:08 crc kubenswrapper[4682]: I1210 11:00:08.681297 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerStarted","Data":"e52a3fa4966764c4d8edd90d2dec89ab7916ceeadb95a15dd4ecdde82d6c0a40"} Dec 10 11:00:09 crc kubenswrapper[4682]: I1210 11:00:09.688509 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerStarted","Data":"3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892"} Dec 10 11:00:10 crc kubenswrapper[4682]: I1210 11:00:10.695624 4682 generic.go:334] "Generic (PLEG): container finished" podID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerID="3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892" exitCode=0 Dec 10 11:00:10 crc kubenswrapper[4682]: I1210 11:00:10.695717 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerDied","Data":"3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892"} Dec 10 11:00:11 crc kubenswrapper[4682]: I1210 11:00:11.703137 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerStarted","Data":"4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6"} Dec 10 11:00:11 crc kubenswrapper[4682]: I1210 11:00:11.724096 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-phpng" podStartSLOduration=2.008793361 podStartE2EDuration="4.724082397s" podCreationTimestamp="2025-12-10 11:00:07 +0000 UTC" firstStartedPulling="2025-12-10 11:00:08.682431727 +0000 UTC m=+889.002642477" lastFinishedPulling="2025-12-10 11:00:11.397720763 +0000 UTC m=+891.717931513" observedRunningTime="2025-12-10 11:00:11.721482734 +0000 UTC m=+892.041693494" watchObservedRunningTime="2025-12-10 11:00:11.724082397 +0000 UTC m=+892.044293147" Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.886807 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv"] Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.888557 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.890451 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.894773 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv"] Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.957488 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5k9b\" (UniqueName: \"kubernetes.io/projected/7601d99a-9766-47a0-931d-b42823276eeb-kube-api-access-w5k9b\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.957585 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:14 crc kubenswrapper[4682]: I1210 11:00:14.957608 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.059040 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.059091 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.059198 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5k9b\" (UniqueName: \"kubernetes.io/projected/7601d99a-9766-47a0-931d-b42823276eeb-kube-api-access-w5k9b\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.059789 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.059996 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.112686 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5k9b\" (UniqueName: \"kubernetes.io/projected/7601d99a-9766-47a0-931d-b42823276eeb-kube-api-access-w5k9b\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.205513 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.392707 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv"] Dec 10 11:00:15 crc kubenswrapper[4682]: I1210 11:00:15.726101 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" event={"ID":"7601d99a-9766-47a0-931d-b42823276eeb","Type":"ContainerStarted","Data":"a1295dbef54c47128d0709399e7299e6ab98301b1f422ee246b95afa29e6eb56"} Dec 10 11:00:16 crc kubenswrapper[4682]: I1210 11:00:16.736421 4682 generic.go:334] "Generic (PLEG): container finished" podID="7601d99a-9766-47a0-931d-b42823276eeb" containerID="2025526bc5bd54a688f74c6e356f820ee49585276ea8122d32d10ac4043049e4" exitCode=0 Dec 10 11:00:16 crc kubenswrapper[4682]: I1210 11:00:16.736538 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" event={"ID":"7601d99a-9766-47a0-931d-b42823276eeb","Type":"ContainerDied","Data":"2025526bc5bd54a688f74c6e356f820ee49585276ea8122d32d10ac4043049e4"} Dec 10 11:00:17 crc kubenswrapper[4682]: I1210 11:00:17.366666 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:17 crc kubenswrapper[4682]: I1210 11:00:17.366742 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:17 crc kubenswrapper[4682]: I1210 11:00:17.406203 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:17 crc kubenswrapper[4682]: I1210 11:00:17.821890 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:19 crc kubenswrapper[4682]: I1210 11:00:19.627911 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-phpng"] Dec 10 11:00:19 crc kubenswrapper[4682]: I1210 11:00:19.756502 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" event={"ID":"7601d99a-9766-47a0-931d-b42823276eeb","Type":"ContainerDied","Data":"d1bb8561ae92cfc545da7c013f089a83351762727c517faf72bbb6bd4a4ec441"} Dec 10 11:00:19 crc kubenswrapper[4682]: I1210 11:00:19.756560 4682 generic.go:334] "Generic (PLEG): container finished" podID="7601d99a-9766-47a0-931d-b42823276eeb" containerID="d1bb8561ae92cfc545da7c013f089a83351762727c517faf72bbb6bd4a4ec441" exitCode=0 Dec 10 11:00:19 crc kubenswrapper[4682]: I1210 11:00:19.757055 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-phpng" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="registry-server" containerID="cri-o://4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6" gracePeriod=2 Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.080552 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.230131 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-utilities\") pod \"4a488ea9-1fff-4f5a-9956-ec555529cea5\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.230229 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-catalog-content\") pod \"4a488ea9-1fff-4f5a-9956-ec555529cea5\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.230293 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n866s\" (UniqueName: \"kubernetes.io/projected/4a488ea9-1fff-4f5a-9956-ec555529cea5-kube-api-access-n866s\") pod \"4a488ea9-1fff-4f5a-9956-ec555529cea5\" (UID: \"4a488ea9-1fff-4f5a-9956-ec555529cea5\") " Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.231217 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-utilities" (OuterVolumeSpecName: "utilities") pod "4a488ea9-1fff-4f5a-9956-ec555529cea5" (UID: "4a488ea9-1fff-4f5a-9956-ec555529cea5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.238641 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a488ea9-1fff-4f5a-9956-ec555529cea5-kube-api-access-n866s" (OuterVolumeSpecName: "kube-api-access-n866s") pod "4a488ea9-1fff-4f5a-9956-ec555529cea5" (UID: "4a488ea9-1fff-4f5a-9956-ec555529cea5"). InnerVolumeSpecName "kube-api-access-n866s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.331293 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.331331 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n866s\" (UniqueName: \"kubernetes.io/projected/4a488ea9-1fff-4f5a-9956-ec555529cea5-kube-api-access-n866s\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.405448 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a488ea9-1fff-4f5a-9956-ec555529cea5" (UID: "4a488ea9-1fff-4f5a-9956-ec555529cea5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.432902 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a488ea9-1fff-4f5a-9956-ec555529cea5-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.763385 4682 generic.go:334] "Generic (PLEG): container finished" podID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerID="4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6" exitCode=0 Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.763435 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-phpng" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.763456 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerDied","Data":"4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6"} Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.763503 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-phpng" event={"ID":"4a488ea9-1fff-4f5a-9956-ec555529cea5","Type":"ContainerDied","Data":"e52a3fa4966764c4d8edd90d2dec89ab7916ceeadb95a15dd4ecdde82d6c0a40"} Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.763522 4682 scope.go:117] "RemoveContainer" containerID="4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.771392 4682 generic.go:334] "Generic (PLEG): container finished" podID="7601d99a-9766-47a0-931d-b42823276eeb" containerID="4031c709d28e87f30db26941ce04707089bc9f8247782eea4b9db3f38e279d25" exitCode=0 Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.771420 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" event={"ID":"7601d99a-9766-47a0-931d-b42823276eeb","Type":"ContainerDied","Data":"4031c709d28e87f30db26941ce04707089bc9f8247782eea4b9db3f38e279d25"} Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.787806 4682 scope.go:117] "RemoveContainer" containerID="3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.806963 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-phpng"] Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.813787 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-phpng"] Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.818149 4682 scope.go:117] "RemoveContainer" containerID="839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.836205 4682 scope.go:117] "RemoveContainer" containerID="4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6" Dec 10 11:00:20 crc kubenswrapper[4682]: E1210 11:00:20.836823 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6\": container with ID starting with 4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6 not found: ID does not exist" containerID="4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.836870 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6"} err="failed to get container status \"4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6\": rpc error: code = NotFound desc = could not find container \"4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6\": container with ID starting with 4d169200a555614607615fb3230b1bc64b2a276eb80bf813f8c6c6ed4d262dd6 not found: ID does not exist" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.836891 4682 scope.go:117] "RemoveContainer" containerID="3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892" Dec 10 11:00:20 crc kubenswrapper[4682]: E1210 11:00:20.837106 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892\": container with ID starting with 3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892 not found: ID does not exist" containerID="3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.837189 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892"} err="failed to get container status \"3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892\": rpc error: code = NotFound desc = could not find container \"3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892\": container with ID starting with 3564761c63f61229bf6e0da616d97de8311dffac4dc591ca9181fcab0a50e892 not found: ID does not exist" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.837255 4682 scope.go:117] "RemoveContainer" containerID="839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab" Dec 10 11:00:20 crc kubenswrapper[4682]: E1210 11:00:20.837593 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab\": container with ID starting with 839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab not found: ID does not exist" containerID="839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab" Dec 10 11:00:20 crc kubenswrapper[4682]: I1210 11:00:20.837617 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab"} err="failed to get container status \"839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab\": rpc error: code = NotFound desc = could not find container \"839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab\": container with ID starting with 839e75fe73aa05aaa65773b3af28bb3e55ad6c25c379119d1057b1726b5a70ab not found: ID does not exist" Dec 10 11:00:21 crc kubenswrapper[4682]: I1210 11:00:21.985950 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.052303 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-bundle\") pod \"7601d99a-9766-47a0-931d-b42823276eeb\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.052404 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5k9b\" (UniqueName: \"kubernetes.io/projected/7601d99a-9766-47a0-931d-b42823276eeb-kube-api-access-w5k9b\") pod \"7601d99a-9766-47a0-931d-b42823276eeb\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.052525 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-util\") pod \"7601d99a-9766-47a0-931d-b42823276eeb\" (UID: \"7601d99a-9766-47a0-931d-b42823276eeb\") " Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.053591 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-bundle" (OuterVolumeSpecName: "bundle") pod "7601d99a-9766-47a0-931d-b42823276eeb" (UID: "7601d99a-9766-47a0-931d-b42823276eeb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.058111 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7601d99a-9766-47a0-931d-b42823276eeb-kube-api-access-w5k9b" (OuterVolumeSpecName: "kube-api-access-w5k9b") pod "7601d99a-9766-47a0-931d-b42823276eeb" (UID: "7601d99a-9766-47a0-931d-b42823276eeb"). InnerVolumeSpecName "kube-api-access-w5k9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.065720 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-util" (OuterVolumeSpecName: "util") pod "7601d99a-9766-47a0-931d-b42823276eeb" (UID: "7601d99a-9766-47a0-931d-b42823276eeb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.154933 4682 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.155248 4682 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7601d99a-9766-47a0-931d-b42823276eeb-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.155262 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5k9b\" (UniqueName: \"kubernetes.io/projected/7601d99a-9766-47a0-931d-b42823276eeb-kube-api-access-w5k9b\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.389365 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" path="/var/lib/kubelet/pods/4a488ea9-1fff-4f5a-9956-ec555529cea5/volumes" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.786701 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" event={"ID":"7601d99a-9766-47a0-931d-b42823276eeb","Type":"ContainerDied","Data":"a1295dbef54c47128d0709399e7299e6ab98301b1f422ee246b95afa29e6eb56"} Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.786739 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1295dbef54c47128d0709399e7299e6ab98301b1f422ee246b95afa29e6eb56" Dec 10 11:00:22 crc kubenswrapper[4682]: I1210 11:00:22.786789 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199062 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d"] Dec 10 11:00:26 crc kubenswrapper[4682]: E1210 11:00:26.199671 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="extract-utilities" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199689 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="extract-utilities" Dec 10 11:00:26 crc kubenswrapper[4682]: E1210 11:00:26.199707 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="registry-server" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199716 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="registry-server" Dec 10 11:00:26 crc kubenswrapper[4682]: E1210 11:00:26.199732 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="pull" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199740 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="pull" Dec 10 11:00:26 crc kubenswrapper[4682]: E1210 11:00:26.199754 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="extract-content" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199761 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="extract-content" Dec 10 11:00:26 crc kubenswrapper[4682]: E1210 11:00:26.199773 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="util" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199780 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="util" Dec 10 11:00:26 crc kubenswrapper[4682]: E1210 11:00:26.199791 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="extract" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199798 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="extract" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199910 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="7601d99a-9766-47a0-931d-b42823276eeb" containerName="extract" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.199933 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a488ea9-1fff-4f5a-9956-ec555529cea5" containerName="registry-server" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.200445 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.203683 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-bpbzc" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.207540 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.208575 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.213812 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d"] Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.308412 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5rpz\" (UniqueName: \"kubernetes.io/projected/8045c75b-04d3-4ffc-a268-3bcce0b6a747-kube-api-access-m5rpz\") pod \"nmstate-operator-5b5b58f5c8-gng7d\" (UID: \"8045c75b-04d3-4ffc-a268-3bcce0b6a747\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.409683 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5rpz\" (UniqueName: \"kubernetes.io/projected/8045c75b-04d3-4ffc-a268-3bcce0b6a747-kube-api-access-m5rpz\") pod \"nmstate-operator-5b5b58f5c8-gng7d\" (UID: \"8045c75b-04d3-4ffc-a268-3bcce0b6a747\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.435157 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5rpz\" (UniqueName: \"kubernetes.io/projected/8045c75b-04d3-4ffc-a268-3bcce0b6a747-kube-api-access-m5rpz\") pod \"nmstate-operator-5b5b58f5c8-gng7d\" (UID: \"8045c75b-04d3-4ffc-a268-3bcce0b6a747\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.525273 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.761502 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d"] Dec 10 11:00:26 crc kubenswrapper[4682]: I1210 11:00:26.811851 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" event={"ID":"8045c75b-04d3-4ffc-a268-3bcce0b6a747","Type":"ContainerStarted","Data":"a743d3f3489235ee84db3a6dce75c0e1f2bbc2693adfc4f324a4a4699f2c6f05"} Dec 10 11:00:27 crc kubenswrapper[4682]: I1210 11:00:27.837079 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9bcc2"] Dec 10 11:00:27 crc kubenswrapper[4682]: I1210 11:00:27.838389 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:27 crc kubenswrapper[4682]: I1210 11:00:27.852670 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9bcc2"] Dec 10 11:00:27 crc kubenswrapper[4682]: I1210 11:00:27.927592 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-catalog-content\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:27 crc kubenswrapper[4682]: I1210 11:00:27.927860 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-utilities\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:27 crc kubenswrapper[4682]: I1210 11:00:27.927884 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cfxs\" (UniqueName: \"kubernetes.io/projected/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-kube-api-access-9cfxs\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.029434 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-catalog-content\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.029529 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-utilities\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.029558 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cfxs\" (UniqueName: \"kubernetes.io/projected/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-kube-api-access-9cfxs\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.030081 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-utilities\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.030345 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-catalog-content\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.066425 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cfxs\" (UniqueName: \"kubernetes.io/projected/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-kube-api-access-9cfxs\") pod \"redhat-marketplace-9bcc2\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.156628 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.415540 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9bcc2"] Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.825742 4682 generic.go:334] "Generic (PLEG): container finished" podID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerID="bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127" exitCode=0 Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.825804 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerDied","Data":"bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127"} Dec 10 11:00:28 crc kubenswrapper[4682]: I1210 11:00:28.825860 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerStarted","Data":"0ffb94fabab5f7320f65b4cd7fea67387fd5a0acbab5790dd0bc0be253884f0c"} Dec 10 11:00:30 crc kubenswrapper[4682]: I1210 11:00:30.839486 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerStarted","Data":"a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f"} Dec 10 11:00:30 crc kubenswrapper[4682]: I1210 11:00:30.840598 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" event={"ID":"8045c75b-04d3-4ffc-a268-3bcce0b6a747","Type":"ContainerStarted","Data":"8c7089a65695e808854253f9924d02f948ef0becc44c8370c785340b8e6c08e4"} Dec 10 11:00:30 crc kubenswrapper[4682]: I1210 11:00:30.879151 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-gng7d" podStartSLOduration=1.090620947 podStartE2EDuration="4.879134287s" podCreationTimestamp="2025-12-10 11:00:26 +0000 UTC" firstStartedPulling="2025-12-10 11:00:26.769151146 +0000 UTC m=+907.089361896" lastFinishedPulling="2025-12-10 11:00:30.557664486 +0000 UTC m=+910.877875236" observedRunningTime="2025-12-10 11:00:30.875583025 +0000 UTC m=+911.195793795" watchObservedRunningTime="2025-12-10 11:00:30.879134287 +0000 UTC m=+911.199345037" Dec 10 11:00:31 crc kubenswrapper[4682]: I1210 11:00:31.849301 4682 generic.go:334] "Generic (PLEG): container finished" podID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerID="a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f" exitCode=0 Dec 10 11:00:31 crc kubenswrapper[4682]: I1210 11:00:31.849413 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerDied","Data":"a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f"} Dec 10 11:00:32 crc kubenswrapper[4682]: I1210 11:00:32.857806 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerStarted","Data":"e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8"} Dec 10 11:00:32 crc kubenswrapper[4682]: I1210 11:00:32.876241 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9bcc2" podStartSLOduration=2.319223168 podStartE2EDuration="5.876221998s" podCreationTimestamp="2025-12-10 11:00:27 +0000 UTC" firstStartedPulling="2025-12-10 11:00:28.827132634 +0000 UTC m=+909.147343384" lastFinishedPulling="2025-12-10 11:00:32.384131464 +0000 UTC m=+912.704342214" observedRunningTime="2025-12-10 11:00:32.875448553 +0000 UTC m=+913.195659313" watchObservedRunningTime="2025-12-10 11:00:32.876221998 +0000 UTC m=+913.196432748" Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.904906 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb"] Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.906255 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.909491 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-dvwjv" Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.914768 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc"] Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.915640 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.917346 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.932248 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb"] Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.947674 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-bk4zp"] Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.948366 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:35 crc kubenswrapper[4682]: I1210 11:00:35.967916 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc"] Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.035550 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpb4v\" (UniqueName: \"kubernetes.io/projected/17409d60-d1ec-49a2-8c40-a8786491d77b-kube-api-access-dpb4v\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.035612 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpqkp\" (UniqueName: \"kubernetes.io/projected/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-kube-api-access-cpqkp\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.035773 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-dbus-socket\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.035868 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-nmstate-lock\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.035992 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/17409d60-d1ec-49a2-8c40-a8786491d77b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.036070 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2pt9\" (UniqueName: \"kubernetes.io/projected/8ff1d21d-ef0f-421b-bb86-264a367afea9-kube-api-access-h2pt9\") pod \"nmstate-metrics-7f946cbc9-zb5sb\" (UID: \"8ff1d21d-ef0f-421b-bb86-264a367afea9\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.036100 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-ovs-socket\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.092778 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9"] Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.093724 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.096814 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-jkdbp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.097131 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.097262 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.115257 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9"] Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.137602 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhsl5\" (UniqueName: \"kubernetes.io/projected/20cd6a69-1431-4436-b960-a1910bb43824-kube-api-access-fhsl5\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.137669 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/17409d60-d1ec-49a2-8c40-a8786491d77b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.137723 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2pt9\" (UniqueName: \"kubernetes.io/projected/8ff1d21d-ef0f-421b-bb86-264a367afea9-kube-api-access-h2pt9\") pod \"nmstate-metrics-7f946cbc9-zb5sb\" (UID: \"8ff1d21d-ef0f-421b-bb86-264a367afea9\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.137749 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-ovs-socket\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.137785 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/20cd6a69-1431-4436-b960-a1910bb43824-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: E1210 11:00:36.137799 4682 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Dec 10 11:00:36 crc kubenswrapper[4682]: E1210 11:00:36.137869 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/17409d60-d1ec-49a2-8c40-a8786491d77b-tls-key-pair podName:17409d60-d1ec-49a2-8c40-a8786491d77b nodeName:}" failed. No retries permitted until 2025-12-10 11:00:36.63785139 +0000 UTC m=+916.958062140 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/17409d60-d1ec-49a2-8c40-a8786491d77b-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-vvtwc" (UID: "17409d60-d1ec-49a2-8c40-a8786491d77b") : secret "openshift-nmstate-webhook" not found Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.137812 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpb4v\" (UniqueName: \"kubernetes.io/projected/17409d60-d1ec-49a2-8c40-a8786491d77b-kube-api-access-dpb4v\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138022 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpqkp\" (UniqueName: \"kubernetes.io/projected/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-kube-api-access-cpqkp\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138064 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-ovs-socket\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138070 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-dbus-socket\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138111 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/20cd6a69-1431-4436-b960-a1910bb43824-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138161 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-nmstate-lock\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138193 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-nmstate-lock\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.138369 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-dbus-socket\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.160179 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpb4v\" (UniqueName: \"kubernetes.io/projected/17409d60-d1ec-49a2-8c40-a8786491d77b-kube-api-access-dpb4v\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.175587 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpqkp\" (UniqueName: \"kubernetes.io/projected/daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a-kube-api-access-cpqkp\") pod \"nmstate-handler-bk4zp\" (UID: \"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a\") " pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.179648 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2pt9\" (UniqueName: \"kubernetes.io/projected/8ff1d21d-ef0f-421b-bb86-264a367afea9-kube-api-access-h2pt9\") pod \"nmstate-metrics-7f946cbc9-zb5sb\" (UID: \"8ff1d21d-ef0f-421b-bb86-264a367afea9\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.237386 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.238962 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhsl5\" (UniqueName: \"kubernetes.io/projected/20cd6a69-1431-4436-b960-a1910bb43824-kube-api-access-fhsl5\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.239050 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/20cd6a69-1431-4436-b960-a1910bb43824-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.239109 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/20cd6a69-1431-4436-b960-a1910bb43824-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: E1210 11:00:36.239233 4682 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Dec 10 11:00:36 crc kubenswrapper[4682]: E1210 11:00:36.239286 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/20cd6a69-1431-4436-b960-a1910bb43824-plugin-serving-cert podName:20cd6a69-1431-4436-b960-a1910bb43824 nodeName:}" failed. No retries permitted until 2025-12-10 11:00:36.739268069 +0000 UTC m=+917.059478819 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/20cd6a69-1431-4436-b960-a1910bb43824-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-qp2p9" (UID: "20cd6a69-1431-4436-b960-a1910bb43824") : secret "plugin-serving-cert" not found Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.240212 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/20cd6a69-1431-4436-b960-a1910bb43824-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.266677 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.286119 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhsl5\" (UniqueName: \"kubernetes.io/projected/20cd6a69-1431-4436-b960-a1910bb43824-kube-api-access-fhsl5\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: W1210 11:00:36.290872 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddaa4f7f7_a1b7_4580_a1bd_a23cc5c8805a.slice/crio-5d3357ef3afa24364d4538f1d9a1a6f21d3f838838d0ca273b03cecb6f5f8bf7 WatchSource:0}: Error finding container 5d3357ef3afa24364d4538f1d9a1a6f21d3f838838d0ca273b03cecb6f5f8bf7: Status 404 returned error can't find the container with id 5d3357ef3afa24364d4538f1d9a1a6f21d3f838838d0ca273b03cecb6f5f8bf7 Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.328570 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6d7b884cc6-549jv"] Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.329301 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.340772 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6d7b884cc6-549jv"] Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446053 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a93c1af-12b9-4654-92a8-a4f347e71416-console-serving-cert\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446346 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-trusted-ca-bundle\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446393 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8bjr\" (UniqueName: \"kubernetes.io/projected/2a93c1af-12b9-4654-92a8-a4f347e71416-kube-api-access-m8bjr\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446418 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2a93c1af-12b9-4654-92a8-a4f347e71416-console-oauth-config\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446441 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-console-config\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446493 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-oauth-serving-cert\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.446528 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-service-ca\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.501585 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb"] Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551343 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a93c1af-12b9-4654-92a8-a4f347e71416-console-serving-cert\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551394 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-trusted-ca-bundle\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551449 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8bjr\" (UniqueName: \"kubernetes.io/projected/2a93c1af-12b9-4654-92a8-a4f347e71416-kube-api-access-m8bjr\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551489 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2a93c1af-12b9-4654-92a8-a4f347e71416-console-oauth-config\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551528 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-console-config\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551560 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-oauth-serving-cert\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.551585 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-service-ca\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.553172 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-service-ca\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.553238 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-oauth-serving-cert\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.553308 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-console-config\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.553314 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a93c1af-12b9-4654-92a8-a4f347e71416-trusted-ca-bundle\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.557199 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a93c1af-12b9-4654-92a8-a4f347e71416-console-serving-cert\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.559003 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2a93c1af-12b9-4654-92a8-a4f347e71416-console-oauth-config\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.567959 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8bjr\" (UniqueName: \"kubernetes.io/projected/2a93c1af-12b9-4654-92a8-a4f347e71416-kube-api-access-m8bjr\") pod \"console-6d7b884cc6-549jv\" (UID: \"2a93c1af-12b9-4654-92a8-a4f347e71416\") " pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.652496 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/17409d60-d1ec-49a2-8c40-a8786491d77b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.656871 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/17409d60-d1ec-49a2-8c40-a8786491d77b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-vvtwc\" (UID: \"17409d60-d1ec-49a2-8c40-a8786491d77b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.663735 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.755588 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/20cd6a69-1431-4436-b960-a1910bb43824-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.761211 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/20cd6a69-1431-4436-b960-a1910bb43824-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qp2p9\" (UID: \"20cd6a69-1431-4436-b960-a1910bb43824\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.848840 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.879677 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bk4zp" event={"ID":"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a","Type":"ContainerStarted","Data":"5d3357ef3afa24364d4538f1d9a1a6f21d3f838838d0ca273b03cecb6f5f8bf7"} Dec 10 11:00:36 crc kubenswrapper[4682]: I1210 11:00:36.880752 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" event={"ID":"8ff1d21d-ef0f-421b-bb86-264a367afea9","Type":"ContainerStarted","Data":"16ee76f6936bacd51f443a31f31b9b0bc6ee202d53e1fd3ff08a1054787b7054"} Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.013755 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.070379 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6d7b884cc6-549jv"] Dec 10 11:00:37 crc kubenswrapper[4682]: W1210 11:00:37.078737 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a93c1af_12b9_4654_92a8_a4f347e71416.slice/crio-611ba7930ef888fa58400733fbda536006cdd87211230d30cd77f206d6a5f1ac WatchSource:0}: Error finding container 611ba7930ef888fa58400733fbda536006cdd87211230d30cd77f206d6a5f1ac: Status 404 returned error can't find the container with id 611ba7930ef888fa58400733fbda536006cdd87211230d30cd77f206d6a5f1ac Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.246608 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc"] Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.417783 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9"] Dec 10 11:00:37 crc kubenswrapper[4682]: W1210 11:00:37.425908 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20cd6a69_1431_4436_b960_a1910bb43824.slice/crio-7c104c3318e22c6ac8faf78473c122357e98b982a26df6d72f887b2f36c32064 WatchSource:0}: Error finding container 7c104c3318e22c6ac8faf78473c122357e98b982a26df6d72f887b2f36c32064: Status 404 returned error can't find the container with id 7c104c3318e22c6ac8faf78473c122357e98b982a26df6d72f887b2f36c32064 Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.889643 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6d7b884cc6-549jv" event={"ID":"2a93c1af-12b9-4654-92a8-a4f347e71416","Type":"ContainerStarted","Data":"4be35c5f7a77ce635b716391afcba78806c2cbc6518c7cac916b5ae79fe09c21"} Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.890039 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6d7b884cc6-549jv" event={"ID":"2a93c1af-12b9-4654-92a8-a4f347e71416","Type":"ContainerStarted","Data":"611ba7930ef888fa58400733fbda536006cdd87211230d30cd77f206d6a5f1ac"} Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.890830 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" event={"ID":"20cd6a69-1431-4436-b960-a1910bb43824","Type":"ContainerStarted","Data":"7c104c3318e22c6ac8faf78473c122357e98b982a26df6d72f887b2f36c32064"} Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.892512 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" event={"ID":"17409d60-d1ec-49a2-8c40-a8786491d77b","Type":"ContainerStarted","Data":"f06647c8f4a3815809ddbb3403a53501d0de0bb7d7d0bf8cb0ad9aeed6731bd1"} Dec 10 11:00:37 crc kubenswrapper[4682]: I1210 11:00:37.915462 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6d7b884cc6-549jv" podStartSLOduration=1.9154408859999998 podStartE2EDuration="1.915440886s" podCreationTimestamp="2025-12-10 11:00:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:00:37.909105836 +0000 UTC m=+918.229316596" watchObservedRunningTime="2025-12-10 11:00:37.915440886 +0000 UTC m=+918.235651656" Dec 10 11:00:38 crc kubenswrapper[4682]: I1210 11:00:38.158427 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:38 crc kubenswrapper[4682]: I1210 11:00:38.158502 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:38 crc kubenswrapper[4682]: I1210 11:00:38.204085 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:38 crc kubenswrapper[4682]: I1210 11:00:38.982322 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.828882 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9bcc2"] Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.907867 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" event={"ID":"8ff1d21d-ef0f-421b-bb86-264a367afea9","Type":"ContainerStarted","Data":"b87b5aa3c3112adc9fd5e2f902ca42a4b0cbbbef03634336b96023427e070112"} Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.909772 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-bk4zp" event={"ID":"daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a","Type":"ContainerStarted","Data":"3109eac0de65458c82158b357447ddeb7419f27e75ca44966ef602b45af1cb8b"} Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.910889 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.912373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" event={"ID":"17409d60-d1ec-49a2-8c40-a8786491d77b","Type":"ContainerStarted","Data":"7e7a685543b3da7e3cf47beefcabdd9a59b3cf4d9271ec7f99f17b93c903ab56"} Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.912492 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.934666 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-bk4zp" podStartSLOduration=1.8508852230000001 podStartE2EDuration="4.934647604s" podCreationTimestamp="2025-12-10 11:00:35 +0000 UTC" firstStartedPulling="2025-12-10 11:00:36.296235836 +0000 UTC m=+916.616446586" lastFinishedPulling="2025-12-10 11:00:39.379998217 +0000 UTC m=+919.700208967" observedRunningTime="2025-12-10 11:00:39.928586543 +0000 UTC m=+920.248797323" watchObservedRunningTime="2025-12-10 11:00:39.934647604 +0000 UTC m=+920.254858354" Dec 10 11:00:39 crc kubenswrapper[4682]: I1210 11:00:39.952843 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" podStartSLOduration=2.759453104 podStartE2EDuration="4.952821297s" podCreationTimestamp="2025-12-10 11:00:35 +0000 UTC" firstStartedPulling="2025-12-10 11:00:37.258567283 +0000 UTC m=+917.578778033" lastFinishedPulling="2025-12-10 11:00:39.451935476 +0000 UTC m=+919.772146226" observedRunningTime="2025-12-10 11:00:39.948248303 +0000 UTC m=+920.268459083" watchObservedRunningTime="2025-12-10 11:00:39.952821297 +0000 UTC m=+920.273032057" Dec 10 11:00:40 crc kubenswrapper[4682]: I1210 11:00:40.924194 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9bcc2" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="registry-server" containerID="cri-o://e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8" gracePeriod=2 Dec 10 11:00:40 crc kubenswrapper[4682]: I1210 11:00:40.923582 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" event={"ID":"20cd6a69-1431-4436-b960-a1910bb43824","Type":"ContainerStarted","Data":"b949c10cd010cad53a77d2469f7d53bf726eeacbb9345151807820249f9df1f1"} Dec 10 11:00:40 crc kubenswrapper[4682]: I1210 11:00:40.947077 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qp2p9" podStartSLOduration=1.859747818 podStartE2EDuration="4.947054661s" podCreationTimestamp="2025-12-10 11:00:36 +0000 UTC" firstStartedPulling="2025-12-10 11:00:37.427075369 +0000 UTC m=+917.747286119" lastFinishedPulling="2025-12-10 11:00:40.514382212 +0000 UTC m=+920.834592962" observedRunningTime="2025-12-10 11:00:40.944064737 +0000 UTC m=+921.264275497" watchObservedRunningTime="2025-12-10 11:00:40.947054661 +0000 UTC m=+921.267265431" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.867906 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.930996 4682 generic.go:334] "Generic (PLEG): container finished" podID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerID="e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8" exitCode=0 Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.931038 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerDied","Data":"e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8"} Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.931073 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9bcc2" event={"ID":"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f","Type":"ContainerDied","Data":"0ffb94fabab5f7320f65b4cd7fea67387fd5a0acbab5790dd0bc0be253884f0c"} Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.931072 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9bcc2" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.931086 4682 scope.go:117] "RemoveContainer" containerID="e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.935931 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cfxs\" (UniqueName: \"kubernetes.io/projected/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-kube-api-access-9cfxs\") pod \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.936275 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-utilities\") pod \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.936381 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-catalog-content\") pod \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\" (UID: \"73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f\") " Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.937124 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-utilities" (OuterVolumeSpecName: "utilities") pod "73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" (UID: "73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.941271 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-kube-api-access-9cfxs" (OuterVolumeSpecName: "kube-api-access-9cfxs") pod "73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" (UID: "73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f"). InnerVolumeSpecName "kube-api-access-9cfxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.957196 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" (UID: "73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.959146 4682 scope.go:117] "RemoveContainer" containerID="a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f" Dec 10 11:00:41 crc kubenswrapper[4682]: I1210 11:00:41.990929 4682 scope.go:117] "RemoveContainer" containerID="bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.029371 4682 scope.go:117] "RemoveContainer" containerID="e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8" Dec 10 11:00:42 crc kubenswrapper[4682]: E1210 11:00:42.029833 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8\": container with ID starting with e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8 not found: ID does not exist" containerID="e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.029873 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8"} err="failed to get container status \"e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8\": rpc error: code = NotFound desc = could not find container \"e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8\": container with ID starting with e2a476926299262bc5c27524fac1c12b0272d8c617d95f0384dd6a7bea3370b8 not found: ID does not exist" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.029899 4682 scope.go:117] "RemoveContainer" containerID="a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f" Dec 10 11:00:42 crc kubenswrapper[4682]: E1210 11:00:42.030246 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f\": container with ID starting with a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f not found: ID does not exist" containerID="a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.030272 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f"} err="failed to get container status \"a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f\": rpc error: code = NotFound desc = could not find container \"a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f\": container with ID starting with a1f78fdfdcc0a2e7b4e0e1dafad44aba54dee117f8feeeef758ec49180cc275f not found: ID does not exist" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.030292 4682 scope.go:117] "RemoveContainer" containerID="bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127" Dec 10 11:00:42 crc kubenswrapper[4682]: E1210 11:00:42.033613 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127\": container with ID starting with bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127 not found: ID does not exist" containerID="bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.033664 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127"} err="failed to get container status \"bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127\": rpc error: code = NotFound desc = could not find container \"bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127\": container with ID starting with bed553dd66925984bb46fa18725c60fb60faa345b8674741c6d76303f40ca127 not found: ID does not exist" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.037446 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cfxs\" (UniqueName: \"kubernetes.io/projected/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-kube-api-access-9cfxs\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.037492 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.037505 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.260936 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9bcc2"] Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.266676 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9bcc2"] Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.389330 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" path="/var/lib/kubelet/pods/73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f/volumes" Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.938635 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" event={"ID":"8ff1d21d-ef0f-421b-bb86-264a367afea9","Type":"ContainerStarted","Data":"4c8d2d6aaed37f91a23dbcc369b0ee7557884f3d5a1a7d6dc371ff06efefcc89"} Dec 10 11:00:42 crc kubenswrapper[4682]: I1210 11:00:42.957626 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zb5sb" podStartSLOduration=2.540165467 podStartE2EDuration="7.957600346s" podCreationTimestamp="2025-12-10 11:00:35 +0000 UTC" firstStartedPulling="2025-12-10 11:00:36.503229566 +0000 UTC m=+916.823440316" lastFinishedPulling="2025-12-10 11:00:41.920664425 +0000 UTC m=+922.240875195" observedRunningTime="2025-12-10 11:00:42.951488363 +0000 UTC m=+923.271699133" watchObservedRunningTime="2025-12-10 11:00:42.957600346 +0000 UTC m=+923.277811106" Dec 10 11:00:46 crc kubenswrapper[4682]: I1210 11:00:46.289118 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-bk4zp" Dec 10 11:00:46 crc kubenswrapper[4682]: I1210 11:00:46.664305 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:46 crc kubenswrapper[4682]: I1210 11:00:46.664372 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:46 crc kubenswrapper[4682]: I1210 11:00:46.669540 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:46 crc kubenswrapper[4682]: I1210 11:00:46.966025 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6d7b884cc6-549jv" Dec 10 11:00:47 crc kubenswrapper[4682]: I1210 11:00:47.026588 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ftd94"] Dec 10 11:00:56 crc kubenswrapper[4682]: I1210 11:00:56.855054 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-vvtwc" Dec 10 11:01:06 crc kubenswrapper[4682]: I1210 11:01:06.478556 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:01:06 crc kubenswrapper[4682]: I1210 11:01:06.479058 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.641113 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q"] Dec 10 11:01:09 crc kubenswrapper[4682]: E1210 11:01:09.642033 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="extract-content" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.642050 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="extract-content" Dec 10 11:01:09 crc kubenswrapper[4682]: E1210 11:01:09.642068 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="registry-server" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.642076 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="registry-server" Dec 10 11:01:09 crc kubenswrapper[4682]: E1210 11:01:09.642102 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="extract-utilities" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.642109 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="extract-utilities" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.642243 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="73bde56c-b6cc-4acc-bfe4-e9a4e6f9dc5f" containerName="registry-server" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.643242 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.645181 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.661107 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q"] Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.807550 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.807756 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpg9w\" (UniqueName: \"kubernetes.io/projected/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-kube-api-access-zpg9w\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.807797 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.908657 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.908741 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpg9w\" (UniqueName: \"kubernetes.io/projected/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-kube-api-access-zpg9w\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.908767 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.909169 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.909179 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.932454 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpg9w\" (UniqueName: \"kubernetes.io/projected/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-kube-api-access-zpg9w\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:09 crc kubenswrapper[4682]: I1210 11:01:09.958375 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:10 crc kubenswrapper[4682]: I1210 11:01:10.181783 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q"] Dec 10 11:01:11 crc kubenswrapper[4682]: I1210 11:01:11.141237 4682 generic.go:334] "Generic (PLEG): container finished" podID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerID="ae1c479561adbd8104736a5a835d8b7789df38413ad6799d7eceeb71b5d64119" exitCode=0 Dec 10 11:01:11 crc kubenswrapper[4682]: I1210 11:01:11.141578 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" event={"ID":"8d258b61-c222-4a6a-9ca6-e73e7d1919b7","Type":"ContainerDied","Data":"ae1c479561adbd8104736a5a835d8b7789df38413ad6799d7eceeb71b5d64119"} Dec 10 11:01:11 crc kubenswrapper[4682]: I1210 11:01:11.141611 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" event={"ID":"8d258b61-c222-4a6a-9ca6-e73e7d1919b7","Type":"ContainerStarted","Data":"06e4a820a1d77164e361bc2b7aad69ab74539cecb1343c86e16977912e517fcb"} Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.070323 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-ftd94" podUID="660474bf-d4be-49dc-b993-5cd3161cb575" containerName="console" containerID="cri-o://a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0" gracePeriod=15 Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.435625 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ftd94_660474bf-d4be-49dc-b993-5cd3161cb575/console/0.log" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.435705 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451609 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-oauth-config\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451702 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-serving-cert\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451733 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-console-config\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451779 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp4qm\" (UniqueName: \"kubernetes.io/projected/660474bf-d4be-49dc-b993-5cd3161cb575-kube-api-access-tp4qm\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451828 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-service-ca\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451868 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-oauth-serving-cert\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.451893 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-trusted-ca-bundle\") pod \"660474bf-d4be-49dc-b993-5cd3161cb575\" (UID: \"660474bf-d4be-49dc-b993-5cd3161cb575\") " Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.453612 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-service-ca" (OuterVolumeSpecName: "service-ca") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.453826 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.454275 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.454882 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-console-config" (OuterVolumeSpecName: "console-config") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.460057 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.465876 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.475971 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/660474bf-d4be-49dc-b993-5cd3161cb575-kube-api-access-tp4qm" (OuterVolumeSpecName: "kube-api-access-tp4qm") pod "660474bf-d4be-49dc-b993-5cd3161cb575" (UID: "660474bf-d4be-49dc-b993-5cd3161cb575"). InnerVolumeSpecName "kube-api-access-tp4qm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553027 4682 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553075 4682 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553267 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp4qm\" (UniqueName: \"kubernetes.io/projected/660474bf-d4be-49dc-b993-5cd3161cb575-kube-api-access-tp4qm\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553284 4682 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553296 4682 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553308 4682 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/660474bf-d4be-49dc-b993-5cd3161cb575-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:12 crc kubenswrapper[4682]: I1210 11:01:12.553319 4682 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/660474bf-d4be-49dc-b993-5cd3161cb575-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.154019 4682 generic.go:334] "Generic (PLEG): container finished" podID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerID="071e7711e51dff7bb0ae061284661f8c691075e82a66e2ff37f88f52d0003641" exitCode=0 Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.154071 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" event={"ID":"8d258b61-c222-4a6a-9ca6-e73e7d1919b7","Type":"ContainerDied","Data":"071e7711e51dff7bb0ae061284661f8c691075e82a66e2ff37f88f52d0003641"} Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.156288 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ftd94_660474bf-d4be-49dc-b993-5cd3161cb575/console/0.log" Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.156326 4682 generic.go:334] "Generic (PLEG): container finished" podID="660474bf-d4be-49dc-b993-5cd3161cb575" containerID="a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0" exitCode=2 Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.156349 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ftd94" event={"ID":"660474bf-d4be-49dc-b993-5cd3161cb575","Type":"ContainerDied","Data":"a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0"} Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.156373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ftd94" event={"ID":"660474bf-d4be-49dc-b993-5cd3161cb575","Type":"ContainerDied","Data":"4d8ce25b297b29e76cdfb03dcc0ed04cc37c5384a257765571ba901a1738fe9c"} Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.156376 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ftd94" Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.156388 4682 scope.go:117] "RemoveContainer" containerID="a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0" Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.187703 4682 scope.go:117] "RemoveContainer" containerID="a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0" Dec 10 11:01:13 crc kubenswrapper[4682]: E1210 11:01:13.188110 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0\": container with ID starting with a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0 not found: ID does not exist" containerID="a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0" Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.188147 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0"} err="failed to get container status \"a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0\": rpc error: code = NotFound desc = could not find container \"a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0\": container with ID starting with a924712cee651a927bafee02277b86713cd92c01a9c84c4eb50df3127e4743b0 not found: ID does not exist" Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.195593 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ftd94"] Dec 10 11:01:13 crc kubenswrapper[4682]: I1210 11:01:13.198140 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-ftd94"] Dec 10 11:01:14 crc kubenswrapper[4682]: I1210 11:01:14.171960 4682 generic.go:334] "Generic (PLEG): container finished" podID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerID="2371a1f6e11cfee437ea52940904143c866dabf0df7b633a00c166383be9a373" exitCode=0 Dec 10 11:01:14 crc kubenswrapper[4682]: I1210 11:01:14.173248 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" event={"ID":"8d258b61-c222-4a6a-9ca6-e73e7d1919b7","Type":"ContainerDied","Data":"2371a1f6e11cfee437ea52940904143c866dabf0df7b633a00c166383be9a373"} Dec 10 11:01:14 crc kubenswrapper[4682]: I1210 11:01:14.388922 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="660474bf-d4be-49dc-b993-5cd3161cb575" path="/var/lib/kubelet/pods/660474bf-d4be-49dc-b993-5cd3161cb575/volumes" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.418160 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.590945 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpg9w\" (UniqueName: \"kubernetes.io/projected/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-kube-api-access-zpg9w\") pod \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.591101 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-util\") pod \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.591133 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-bundle\") pod \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\" (UID: \"8d258b61-c222-4a6a-9ca6-e73e7d1919b7\") " Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.592046 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-bundle" (OuterVolumeSpecName: "bundle") pod "8d258b61-c222-4a6a-9ca6-e73e7d1919b7" (UID: "8d258b61-c222-4a6a-9ca6-e73e7d1919b7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.596739 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-kube-api-access-zpg9w" (OuterVolumeSpecName: "kube-api-access-zpg9w") pod "8d258b61-c222-4a6a-9ca6-e73e7d1919b7" (UID: "8d258b61-c222-4a6a-9ca6-e73e7d1919b7"). InnerVolumeSpecName "kube-api-access-zpg9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.604306 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-util" (OuterVolumeSpecName: "util") pod "8d258b61-c222-4a6a-9ca6-e73e7d1919b7" (UID: "8d258b61-c222-4a6a-9ca6-e73e7d1919b7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.692805 4682 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.692857 4682 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:15 crc kubenswrapper[4682]: I1210 11:01:15.692881 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpg9w\" (UniqueName: \"kubernetes.io/projected/8d258b61-c222-4a6a-9ca6-e73e7d1919b7-kube-api-access-zpg9w\") on node \"crc\" DevicePath \"\"" Dec 10 11:01:16 crc kubenswrapper[4682]: I1210 11:01:16.187823 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" event={"ID":"8d258b61-c222-4a6a-9ca6-e73e7d1919b7","Type":"ContainerDied","Data":"06e4a820a1d77164e361bc2b7aad69ab74539cecb1343c86e16977912e517fcb"} Dec 10 11:01:16 crc kubenswrapper[4682]: I1210 11:01:16.188189 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06e4a820a1d77164e361bc2b7aad69ab74539cecb1343c86e16977912e517fcb" Dec 10 11:01:16 crc kubenswrapper[4682]: I1210 11:01:16.187896 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.417727 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw"] Dec 10 11:01:25 crc kubenswrapper[4682]: E1210 11:01:25.419694 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="extract" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.419791 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="extract" Dec 10 11:01:25 crc kubenswrapper[4682]: E1210 11:01:25.419867 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="660474bf-d4be-49dc-b993-5cd3161cb575" containerName="console" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.419937 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="660474bf-d4be-49dc-b993-5cd3161cb575" containerName="console" Dec 10 11:01:25 crc kubenswrapper[4682]: E1210 11:01:25.420024 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="util" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.420095 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="util" Dec 10 11:01:25 crc kubenswrapper[4682]: E1210 11:01:25.420175 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="pull" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.420247 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="pull" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.420450 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d258b61-c222-4a6a-9ca6-e73e7d1919b7" containerName="extract" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.420568 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="660474bf-d4be-49dc-b993-5cd3161cb575" containerName="console" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.421139 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.424139 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.425301 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.425370 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.425543 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-slqjq" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.425637 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.450495 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw"] Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.616914 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d34c412-0fb1-4dd3-9d93-66d805babdb3-apiservice-cert\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.617024 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds6j4\" (UniqueName: \"kubernetes.io/projected/0d34c412-0fb1-4dd3-9d93-66d805babdb3-kube-api-access-ds6j4\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.617050 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d34c412-0fb1-4dd3-9d93-66d805babdb3-webhook-cert\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.718602 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds6j4\" (UniqueName: \"kubernetes.io/projected/0d34c412-0fb1-4dd3-9d93-66d805babdb3-kube-api-access-ds6j4\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.718654 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d34c412-0fb1-4dd3-9d93-66d805babdb3-webhook-cert\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.718691 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d34c412-0fb1-4dd3-9d93-66d805babdb3-apiservice-cert\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.723866 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d34c412-0fb1-4dd3-9d93-66d805babdb3-apiservice-cert\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.723966 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d34c412-0fb1-4dd3-9d93-66d805babdb3-webhook-cert\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.745060 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds6j4\" (UniqueName: \"kubernetes.io/projected/0d34c412-0fb1-4dd3-9d93-66d805babdb3-kube-api-access-ds6j4\") pod \"metallb-operator-controller-manager-59c7c5b449-wb9kw\" (UID: \"0d34c412-0fb1-4dd3-9d93-66d805babdb3\") " pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.771622 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm"] Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.772492 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.775610 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.775973 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.776138 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-szvvv" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.799021 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm"] Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.821894 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-webhook-cert\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.821941 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-apiservice-cert\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.821975 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzvx6\" (UniqueName: \"kubernetes.io/projected/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-kube-api-access-dzvx6\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.923227 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzvx6\" (UniqueName: \"kubernetes.io/projected/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-kube-api-access-dzvx6\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.923373 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-webhook-cert\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.923403 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-apiservice-cert\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.926832 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-apiservice-cert\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.927297 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-webhook-cert\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:25 crc kubenswrapper[4682]: I1210 11:01:25.948199 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzvx6\" (UniqueName: \"kubernetes.io/projected/4c20b9d3-1c34-4d9a-8917-8933d9c376ce-kube-api-access-dzvx6\") pod \"metallb-operator-webhook-server-6f885c4f9b-4fdwm\" (UID: \"4c20b9d3-1c34-4d9a-8917-8933d9c376ce\") " pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:26 crc kubenswrapper[4682]: I1210 11:01:26.036912 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:26 crc kubenswrapper[4682]: I1210 11:01:26.091072 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:26 crc kubenswrapper[4682]: I1210 11:01:26.288015 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw"] Dec 10 11:01:26 crc kubenswrapper[4682]: W1210 11:01:26.295207 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d34c412_0fb1_4dd3_9d93_66d805babdb3.slice/crio-e5c095f5ce8e20fedf6bcfe99610c8384421b5b08ff604f2e8f58e18efe4fa3c WatchSource:0}: Error finding container e5c095f5ce8e20fedf6bcfe99610c8384421b5b08ff604f2e8f58e18efe4fa3c: Status 404 returned error can't find the container with id e5c095f5ce8e20fedf6bcfe99610c8384421b5b08ff604f2e8f58e18efe4fa3c Dec 10 11:01:26 crc kubenswrapper[4682]: I1210 11:01:26.579805 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm"] Dec 10 11:01:27 crc kubenswrapper[4682]: I1210 11:01:27.250842 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" event={"ID":"4c20b9d3-1c34-4d9a-8917-8933d9c376ce","Type":"ContainerStarted","Data":"8fb3633f425345d9f2145871b2e49d06bcb9ec330e45facb0f16dd0cbb75d8f8"} Dec 10 11:01:27 crc kubenswrapper[4682]: I1210 11:01:27.252161 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" event={"ID":"0d34c412-0fb1-4dd3-9d93-66d805babdb3","Type":"ContainerStarted","Data":"e5c095f5ce8e20fedf6bcfe99610c8384421b5b08ff604f2e8f58e18efe4fa3c"} Dec 10 11:01:30 crc kubenswrapper[4682]: I1210 11:01:30.274015 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" event={"ID":"0d34c412-0fb1-4dd3-9d93-66d805babdb3","Type":"ContainerStarted","Data":"5dc5840849698d104674e302dee1b877af26a646b068b1a3456cc7cc373282d0"} Dec 10 11:01:30 crc kubenswrapper[4682]: I1210 11:01:30.274597 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:01:30 crc kubenswrapper[4682]: I1210 11:01:30.321295 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" podStartSLOduration=2.447926267 podStartE2EDuration="5.32127711s" podCreationTimestamp="2025-12-10 11:01:25 +0000 UTC" firstStartedPulling="2025-12-10 11:01:26.298020501 +0000 UTC m=+966.618231241" lastFinishedPulling="2025-12-10 11:01:29.171371334 +0000 UTC m=+969.491582084" observedRunningTime="2025-12-10 11:01:30.317200531 +0000 UTC m=+970.637411281" watchObservedRunningTime="2025-12-10 11:01:30.32127711 +0000 UTC m=+970.641487860" Dec 10 11:01:31 crc kubenswrapper[4682]: I1210 11:01:31.283308 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" event={"ID":"4c20b9d3-1c34-4d9a-8917-8933d9c376ce","Type":"ContainerStarted","Data":"76d2ac915751be08e4772b560de25ab4e46efab277853c01d00f44abacf59f77"} Dec 10 11:01:31 crc kubenswrapper[4682]: I1210 11:01:31.303363 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" podStartSLOduration=2.155593064 podStartE2EDuration="6.30333876s" podCreationTimestamp="2025-12-10 11:01:25 +0000 UTC" firstStartedPulling="2025-12-10 11:01:26.587318767 +0000 UTC m=+966.907529517" lastFinishedPulling="2025-12-10 11:01:30.735064463 +0000 UTC m=+971.055275213" observedRunningTime="2025-12-10 11:01:31.301588604 +0000 UTC m=+971.621799444" watchObservedRunningTime="2025-12-10 11:01:31.30333876 +0000 UTC m=+971.623549510" Dec 10 11:01:32 crc kubenswrapper[4682]: I1210 11:01:32.288928 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:01:36 crc kubenswrapper[4682]: I1210 11:01:36.479021 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:01:36 crc kubenswrapper[4682]: I1210 11:01:36.479355 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:01:46 crc kubenswrapper[4682]: I1210 11:01:46.095977 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6f885c4f9b-4fdwm" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.039866 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-59c7c5b449-wb9kw" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.478368 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.478793 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.478854 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.479665 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb1f236ceb4d4541ff9535181be092107ce5f587a0c363e01762746593060db5"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.479799 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://cb1f236ceb4d4541ff9535181be092107ce5f587a0c363e01762746593060db5" gracePeriod=600 Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.823877 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b"] Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.825145 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.827702 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-k4rj2" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.828749 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.834513 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b"] Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.855192 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-l959x"] Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.859511 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.861080 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.861507 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.905591 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-lwpvn"] Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.906747 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-lwpvn" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.908429 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-gs85q" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.908707 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.908709 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.911510 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.917040 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-vxx5m"] Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.917936 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.920772 4682 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.929943 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-vxx5m"] Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.958923 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959068 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqjsr\" (UniqueName: \"kubernetes.io/projected/d57c92a8-faaf-46ae-969f-db2ceefc22f0-kube-api-access-tqjsr\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959161 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959241 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-reloader\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959293 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-startup\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959312 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-conf\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959346 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pclz7\" (UniqueName: \"kubernetes.io/projected/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-kube-api-access-pclz7\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959373 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics-certs\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:06 crc kubenswrapper[4682]: I1210 11:02:06.959537 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-sockets\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.060972 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/511918ac-6534-424c-8dcc-6af79a689e3b-metrics-certs\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061041 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061071 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/85685af9-4c58-4bee-bf6b-abe9fb2626f9-metallb-excludel2\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061106 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r487r\" (UniqueName: \"kubernetes.io/projected/85685af9-4c58-4bee-bf6b-abe9fb2626f9-kube-api-access-r487r\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061139 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/511918ac-6534-424c-8dcc-6af79a689e3b-cert\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061163 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqjsr\" (UniqueName: \"kubernetes.io/projected/d57c92a8-faaf-46ae-969f-db2ceefc22f0-kube-api-access-tqjsr\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061191 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-memberlist\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061221 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061281 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54cxc\" (UniqueName: \"kubernetes.io/projected/511918ac-6534-424c-8dcc-6af79a689e3b-kube-api-access-54cxc\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: E1210 11:02:07.061319 4682 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Dec 10 11:02:07 crc kubenswrapper[4682]: E1210 11:02:07.061373 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-cert podName:9e2611bd-3314-4e57-9167-e2fbfa6fecf2 nodeName:}" failed. No retries permitted until 2025-12-10 11:02:07.561353241 +0000 UTC m=+1007.881563991 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-cert") pod "frr-k8s-webhook-server-7fcb986d4-m264b" (UID: "9e2611bd-3314-4e57-9167-e2fbfa6fecf2") : secret "frr-k8s-webhook-server-cert" not found Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061401 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-reloader\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061462 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-metrics-certs\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061503 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-startup\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061526 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-conf\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061551 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pclz7\" (UniqueName: \"kubernetes.io/projected/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-kube-api-access-pclz7\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061553 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: E1210 11:02:07.061656 4682 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Dec 10 11:02:07 crc kubenswrapper[4682]: E1210 11:02:07.061701 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics-certs podName:d57c92a8-faaf-46ae-969f-db2ceefc22f0 nodeName:}" failed. No retries permitted until 2025-12-10 11:02:07.561685832 +0000 UTC m=+1007.881896582 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics-certs") pod "frr-k8s-l959x" (UID: "d57c92a8-faaf-46ae-969f-db2ceefc22f0") : secret "frr-k8s-certs-secret" not found Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061565 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics-certs\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061766 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-sockets\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.061899 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-reloader\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.062060 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-sockets\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.062209 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-conf\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.062651 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/d57c92a8-faaf-46ae-969f-db2ceefc22f0-frr-startup\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.092083 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pclz7\" (UniqueName: \"kubernetes.io/projected/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-kube-api-access-pclz7\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.094713 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqjsr\" (UniqueName: \"kubernetes.io/projected/d57c92a8-faaf-46ae-969f-db2ceefc22f0-kube-api-access-tqjsr\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163151 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/511918ac-6534-424c-8dcc-6af79a689e3b-metrics-certs\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163211 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/85685af9-4c58-4bee-bf6b-abe9fb2626f9-metallb-excludel2\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163235 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r487r\" (UniqueName: \"kubernetes.io/projected/85685af9-4c58-4bee-bf6b-abe9fb2626f9-kube-api-access-r487r\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163266 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/511918ac-6534-424c-8dcc-6af79a689e3b-cert\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163292 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-memberlist\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163352 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54cxc\" (UniqueName: \"kubernetes.io/projected/511918ac-6534-424c-8dcc-6af79a689e3b-kube-api-access-54cxc\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.163383 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-metrics-certs\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: E1210 11:02:07.163672 4682 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 10 11:02:07 crc kubenswrapper[4682]: E1210 11:02:07.163733 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-memberlist podName:85685af9-4c58-4bee-bf6b-abe9fb2626f9 nodeName:}" failed. No retries permitted until 2025-12-10 11:02:07.663717776 +0000 UTC m=+1007.983928526 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-memberlist") pod "speaker-lwpvn" (UID: "85685af9-4c58-4bee-bf6b-abe9fb2626f9") : secret "metallb-memberlist" not found Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.164056 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/85685af9-4c58-4bee-bf6b-abe9fb2626f9-metallb-excludel2\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.167091 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-metrics-certs\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.168060 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/511918ac-6534-424c-8dcc-6af79a689e3b-metrics-certs\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.170922 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/511918ac-6534-424c-8dcc-6af79a689e3b-cert\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.178681 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r487r\" (UniqueName: \"kubernetes.io/projected/85685af9-4c58-4bee-bf6b-abe9fb2626f9-kube-api-access-r487r\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.180729 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54cxc\" (UniqueName: \"kubernetes.io/projected/511918ac-6534-424c-8dcc-6af79a689e3b-kube-api-access-54cxc\") pod \"controller-f8648f98b-vxx5m\" (UID: \"511918ac-6534-424c-8dcc-6af79a689e3b\") " pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.232402 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.413306 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-vxx5m"] Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.501226 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-vxx5m" event={"ID":"511918ac-6534-424c-8dcc-6af79a689e3b","Type":"ContainerStarted","Data":"067428f084b99b432e0fb65493c0291c3e1ba9d63fa9651f5dee10cae6ff7123"} Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.503528 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="cb1f236ceb4d4541ff9535181be092107ce5f587a0c363e01762746593060db5" exitCode=0 Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.503550 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"cb1f236ceb4d4541ff9535181be092107ce5f587a0c363e01762746593060db5"} Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.503591 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"a87379aa7407b916521958c3640f1cf7fec14e9fe313d9dbea26901e472ba31c"} Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.503611 4682 scope.go:117] "RemoveContainer" containerID="c6bff78a240d5adae318d431b3e181644756793c403e51687d775ce4fb2cfb9a" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.569056 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.569154 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics-certs\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.574622 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9e2611bd-3314-4e57-9167-e2fbfa6fecf2-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m264b\" (UID: \"9e2611bd-3314-4e57-9167-e2fbfa6fecf2\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.574971 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d57c92a8-faaf-46ae-969f-db2ceefc22f0-metrics-certs\") pod \"frr-k8s-l959x\" (UID: \"d57c92a8-faaf-46ae-969f-db2ceefc22f0\") " pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.670190 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-memberlist\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.673062 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/85685af9-4c58-4bee-bf6b-abe9fb2626f9-memberlist\") pod \"speaker-lwpvn\" (UID: \"85685af9-4c58-4bee-bf6b-abe9fb2626f9\") " pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.737827 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.774217 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:07 crc kubenswrapper[4682]: I1210 11:02:07.821996 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-lwpvn" Dec 10 11:02:07 crc kubenswrapper[4682]: W1210 11:02:07.859516 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85685af9_4c58_4bee_bf6b_abe9fb2626f9.slice/crio-6db8febbac93bdbef1385b9d9459447fa4943209008da155d77f9acc6ca156b7 WatchSource:0}: Error finding container 6db8febbac93bdbef1385b9d9459447fa4943209008da155d77f9acc6ca156b7: Status 404 returned error can't find the container with id 6db8febbac93bdbef1385b9d9459447fa4943209008da155d77f9acc6ca156b7 Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.156782 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b"] Dec 10 11:02:08 crc kubenswrapper[4682]: W1210 11:02:08.166905 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e2611bd_3314_4e57_9167_e2fbfa6fecf2.slice/crio-f20313b4c632b3ad421549e2df05bf56561bbff9eebbefd52a93e1618687aca5 WatchSource:0}: Error finding container f20313b4c632b3ad421549e2df05bf56561bbff9eebbefd52a93e1618687aca5: Status 404 returned error can't find the container with id f20313b4c632b3ad421549e2df05bf56561bbff9eebbefd52a93e1618687aca5 Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.525960 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lwpvn" event={"ID":"85685af9-4c58-4bee-bf6b-abe9fb2626f9","Type":"ContainerStarted","Data":"2bac70fb4f5efd7aa9337d74dfdabb12a1c650c800fdac946a1565a6aa6f2542"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.526025 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lwpvn" event={"ID":"85685af9-4c58-4bee-bf6b-abe9fb2626f9","Type":"ContainerStarted","Data":"da01fb0102543816d919089392bfec2e302f906408fe8308f172113803b17f43"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.526043 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lwpvn" event={"ID":"85685af9-4c58-4bee-bf6b-abe9fb2626f9","Type":"ContainerStarted","Data":"6db8febbac93bdbef1385b9d9459447fa4943209008da155d77f9acc6ca156b7"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.529012 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-lwpvn" Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.534824 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-vxx5m" event={"ID":"511918ac-6534-424c-8dcc-6af79a689e3b","Type":"ContainerStarted","Data":"3f490f422824535d502e99fa02eb4ff0f4360b0fa961301934dc4830b9df094f"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.534881 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-vxx5m" event={"ID":"511918ac-6534-424c-8dcc-6af79a689e3b","Type":"ContainerStarted","Data":"e2a01353c43c7acbf9b5aa0a50b9c074b31491edd00bdafa522003126329e93b"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.534995 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.543685 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"b4db3da73c9d42d983ef4743244fb8d2ebc9ee9822def4a2496b4c71bd79b97b"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.555628 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" event={"ID":"9e2611bd-3314-4e57-9167-e2fbfa6fecf2","Type":"ContainerStarted","Data":"f20313b4c632b3ad421549e2df05bf56561bbff9eebbefd52a93e1618687aca5"} Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.612945 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-lwpvn" podStartSLOduration=2.612910253 podStartE2EDuration="2.612910253s" podCreationTimestamp="2025-12-10 11:02:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:02:08.580576977 +0000 UTC m=+1008.900787737" watchObservedRunningTime="2025-12-10 11:02:08.612910253 +0000 UTC m=+1008.933121003" Dec 10 11:02:08 crc kubenswrapper[4682]: I1210 11:02:08.618772 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-vxx5m" podStartSLOduration=2.618759787 podStartE2EDuration="2.618759787s" podCreationTimestamp="2025-12-10 11:02:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:02:08.605082658 +0000 UTC m=+1008.925293408" watchObservedRunningTime="2025-12-10 11:02:08.618759787 +0000 UTC m=+1008.938970537" Dec 10 11:02:15 crc kubenswrapper[4682]: I1210 11:02:15.617593 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" event={"ID":"9e2611bd-3314-4e57-9167-e2fbfa6fecf2","Type":"ContainerStarted","Data":"c7f105aa89eda279c5a01acbd4922d23f88edcf8863fc35e9e676e5ea5d059c1"} Dec 10 11:02:15 crc kubenswrapper[4682]: I1210 11:02:15.619616 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerDied","Data":"36c55aa4a71d491fee970ea3c3371da1635370cfcf598d61ad2cb52f38a9472f"} Dec 10 11:02:15 crc kubenswrapper[4682]: I1210 11:02:15.619522 4682 generic.go:334] "Generic (PLEG): container finished" podID="d57c92a8-faaf-46ae-969f-db2ceefc22f0" containerID="36c55aa4a71d491fee970ea3c3371da1635370cfcf598d61ad2cb52f38a9472f" exitCode=0 Dec 10 11:02:15 crc kubenswrapper[4682]: I1210 11:02:15.620917 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:15 crc kubenswrapper[4682]: I1210 11:02:15.643337 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" podStartSLOduration=3.351327972 podStartE2EDuration="9.643317682s" podCreationTimestamp="2025-12-10 11:02:06 +0000 UTC" firstStartedPulling="2025-12-10 11:02:08.170404468 +0000 UTC m=+1008.490615218" lastFinishedPulling="2025-12-10 11:02:14.462394178 +0000 UTC m=+1014.782604928" observedRunningTime="2025-12-10 11:02:15.637395745 +0000 UTC m=+1015.957606505" watchObservedRunningTime="2025-12-10 11:02:15.643317682 +0000 UTC m=+1015.963528442" Dec 10 11:02:16 crc kubenswrapper[4682]: I1210 11:02:16.632261 4682 generic.go:334] "Generic (PLEG): container finished" podID="d57c92a8-faaf-46ae-969f-db2ceefc22f0" containerID="49daf467826cc11dc9aa7f04bef1b678ad29c62fde6ddbef680b87db5ddca5d3" exitCode=0 Dec 10 11:02:16 crc kubenswrapper[4682]: I1210 11:02:16.632379 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerDied","Data":"49daf467826cc11dc9aa7f04bef1b678ad29c62fde6ddbef680b87db5ddca5d3"} Dec 10 11:02:17 crc kubenswrapper[4682]: I1210 11:02:17.237926 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-vxx5m" Dec 10 11:02:17 crc kubenswrapper[4682]: I1210 11:02:17.640308 4682 generic.go:334] "Generic (PLEG): container finished" podID="d57c92a8-faaf-46ae-969f-db2ceefc22f0" containerID="59ed4f55fd93dd3314f635990ae9106c0a33edf46ae8af44a8797c0f8e02f9cb" exitCode=0 Dec 10 11:02:17 crc kubenswrapper[4682]: I1210 11:02:17.640348 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerDied","Data":"59ed4f55fd93dd3314f635990ae9106c0a33edf46ae8af44a8797c0f8e02f9cb"} Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.657944 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"665bd1c9bf421d8c09c608bce701d6159b96aedbf859a5c72e37d48e952a29ac"} Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.658357 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.658372 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"7f29802a08ba4279da1b6c2effeaadda023e0e8fff7458b1106584699a02b073"} Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.658387 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"ab759d32c04a371c07756d0e37a8b35f1aee091ae75e327595e0d508e4467588"} Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.658400 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"eb37a56ac2a60f62f31f05ee2ec11b2a70781c46bb213ce8b0b6f20bc4e153c2"} Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.658411 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"abeca05151495065b1ee72cfd74ef30ab4ebb49ad7fd03980e5a76c5b9f02fe9"} Dec 10 11:02:18 crc kubenswrapper[4682]: I1210 11:02:18.658420 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-l959x" event={"ID":"d57c92a8-faaf-46ae-969f-db2ceefc22f0","Type":"ContainerStarted","Data":"0e84bf00021d55cb96d4ac60e108cfba90dbdef0582a5d43e2934567ab8cc68a"} Dec 10 11:02:22 crc kubenswrapper[4682]: I1210 11:02:22.775372 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:22 crc kubenswrapper[4682]: I1210 11:02:22.813755 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:22 crc kubenswrapper[4682]: I1210 11:02:22.838998 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-l959x" podStartSLOduration=10.295629205000001 podStartE2EDuration="16.838977599s" podCreationTimestamp="2025-12-10 11:02:06 +0000 UTC" firstStartedPulling="2025-12-10 11:02:07.940114656 +0000 UTC m=+1008.260325406" lastFinishedPulling="2025-12-10 11:02:14.48346301 +0000 UTC m=+1014.803673800" observedRunningTime="2025-12-10 11:02:18.687289628 +0000 UTC m=+1019.007500388" watchObservedRunningTime="2025-12-10 11:02:22.838977599 +0000 UTC m=+1023.159188359" Dec 10 11:02:27 crc kubenswrapper[4682]: I1210 11:02:27.741870 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m264b" Dec 10 11:02:27 crc kubenswrapper[4682]: I1210 11:02:27.778449 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-l959x" Dec 10 11:02:27 crc kubenswrapper[4682]: I1210 11:02:27.825872 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-lwpvn" Dec 10 11:02:30 crc kubenswrapper[4682]: I1210 11:02:30.995797 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-lqn88"] Dec 10 11:02:30 crc kubenswrapper[4682]: I1210 11:02:30.997117 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.016584 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.017742 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-tvv4p" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.022339 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.029330 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lqn88"] Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.112485 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl9bc\" (UniqueName: \"kubernetes.io/projected/0753e0fc-e38c-463c-a8d3-800d51598e59-kube-api-access-jl9bc\") pod \"openstack-operator-index-lqn88\" (UID: \"0753e0fc-e38c-463c-a8d3-800d51598e59\") " pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.213463 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl9bc\" (UniqueName: \"kubernetes.io/projected/0753e0fc-e38c-463c-a8d3-800d51598e59-kube-api-access-jl9bc\") pod \"openstack-operator-index-lqn88\" (UID: \"0753e0fc-e38c-463c-a8d3-800d51598e59\") " pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.236003 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl9bc\" (UniqueName: \"kubernetes.io/projected/0753e0fc-e38c-463c-a8d3-800d51598e59-kube-api-access-jl9bc\") pod \"openstack-operator-index-lqn88\" (UID: \"0753e0fc-e38c-463c-a8d3-800d51598e59\") " pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.362035 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.842381 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lqn88"] Dec 10 11:02:31 crc kubenswrapper[4682]: W1210 11:02:31.849029 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0753e0fc_e38c_463c_a8d3_800d51598e59.slice/crio-babdaf8d7287278a5f22efcfae1b89a509c1daff3812968fc0458e6cdbb1bcea WatchSource:0}: Error finding container babdaf8d7287278a5f22efcfae1b89a509c1daff3812968fc0458e6cdbb1bcea: Status 404 returned error can't find the container with id babdaf8d7287278a5f22efcfae1b89a509c1daff3812968fc0458e6cdbb1bcea Dec 10 11:02:31 crc kubenswrapper[4682]: I1210 11:02:31.852002 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:02:32 crc kubenswrapper[4682]: I1210 11:02:32.765864 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lqn88" event={"ID":"0753e0fc-e38c-463c-a8d3-800d51598e59","Type":"ContainerStarted","Data":"babdaf8d7287278a5f22efcfae1b89a509c1daff3812968fc0458e6cdbb1bcea"} Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.165582 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lqn88"] Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.789971 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lqn88" event={"ID":"0753e0fc-e38c-463c-a8d3-800d51598e59","Type":"ContainerStarted","Data":"b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e"} Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.790297 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-fg9v7"] Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.790122 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-lqn88" podUID="0753e0fc-e38c-463c-a8d3-800d51598e59" containerName="registry-server" containerID="cri-o://b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e" gracePeriod=2 Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.791026 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.801197 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fg9v7"] Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.820510 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-lqn88" podStartSLOduration=2.43199725 podStartE2EDuration="4.820459303s" podCreationTimestamp="2025-12-10 11:02:30 +0000 UTC" firstStartedPulling="2025-12-10 11:02:31.851735009 +0000 UTC m=+1032.171945759" lastFinishedPulling="2025-12-10 11:02:34.240197062 +0000 UTC m=+1034.560407812" observedRunningTime="2025-12-10 11:02:34.817131669 +0000 UTC m=+1035.137342459" watchObservedRunningTime="2025-12-10 11:02:34.820459303 +0000 UTC m=+1035.140670083" Dec 10 11:02:34 crc kubenswrapper[4682]: I1210 11:02:34.967740 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xbp5\" (UniqueName: \"kubernetes.io/projected/a0ccaebf-eedd-4bb2-927c-6d59100df2b3-kube-api-access-8xbp5\") pod \"openstack-operator-index-fg9v7\" (UID: \"a0ccaebf-eedd-4bb2-927c-6d59100df2b3\") " pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.069373 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xbp5\" (UniqueName: \"kubernetes.io/projected/a0ccaebf-eedd-4bb2-927c-6d59100df2b3-kube-api-access-8xbp5\") pod \"openstack-operator-index-fg9v7\" (UID: \"a0ccaebf-eedd-4bb2-927c-6d59100df2b3\") " pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.100423 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xbp5\" (UniqueName: \"kubernetes.io/projected/a0ccaebf-eedd-4bb2-927c-6d59100df2b3-kube-api-access-8xbp5\") pod \"openstack-operator-index-fg9v7\" (UID: \"a0ccaebf-eedd-4bb2-927c-6d59100df2b3\") " pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.124688 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.213977 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.374018 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl9bc\" (UniqueName: \"kubernetes.io/projected/0753e0fc-e38c-463c-a8d3-800d51598e59-kube-api-access-jl9bc\") pod \"0753e0fc-e38c-463c-a8d3-800d51598e59\" (UID: \"0753e0fc-e38c-463c-a8d3-800d51598e59\") " Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.394751 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0753e0fc-e38c-463c-a8d3-800d51598e59-kube-api-access-jl9bc" (OuterVolumeSpecName: "kube-api-access-jl9bc") pod "0753e0fc-e38c-463c-a8d3-800d51598e59" (UID: "0753e0fc-e38c-463c-a8d3-800d51598e59"). InnerVolumeSpecName "kube-api-access-jl9bc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.479266 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl9bc\" (UniqueName: \"kubernetes.io/projected/0753e0fc-e38c-463c-a8d3-800d51598e59-kube-api-access-jl9bc\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.568354 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fg9v7"] Dec 10 11:02:35 crc kubenswrapper[4682]: W1210 11:02:35.576448 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0ccaebf_eedd_4bb2_927c_6d59100df2b3.slice/crio-2983dccbdec89c2a2b5c555bf12424fdfd4a2a0c51b42a8702ca9ccc3bdfa070 WatchSource:0}: Error finding container 2983dccbdec89c2a2b5c555bf12424fdfd4a2a0c51b42a8702ca9ccc3bdfa070: Status 404 returned error can't find the container with id 2983dccbdec89c2a2b5c555bf12424fdfd4a2a0c51b42a8702ca9ccc3bdfa070 Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.797241 4682 generic.go:334] "Generic (PLEG): container finished" podID="0753e0fc-e38c-463c-a8d3-800d51598e59" containerID="b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e" exitCode=0 Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.797305 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lqn88" event={"ID":"0753e0fc-e38c-463c-a8d3-800d51598e59","Type":"ContainerDied","Data":"b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e"} Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.797565 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lqn88" event={"ID":"0753e0fc-e38c-463c-a8d3-800d51598e59","Type":"ContainerDied","Data":"babdaf8d7287278a5f22efcfae1b89a509c1daff3812968fc0458e6cdbb1bcea"} Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.797583 4682 scope.go:117] "RemoveContainer" containerID="b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.797320 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lqn88" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.800429 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fg9v7" event={"ID":"a0ccaebf-eedd-4bb2-927c-6d59100df2b3","Type":"ContainerStarted","Data":"4776f0a7da987bd951b430c2f65d0070b26e85a8c8c32adf49c17dc00558337d"} Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.800547 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fg9v7" event={"ID":"a0ccaebf-eedd-4bb2-927c-6d59100df2b3","Type":"ContainerStarted","Data":"2983dccbdec89c2a2b5c555bf12424fdfd4a2a0c51b42a8702ca9ccc3bdfa070"} Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.817198 4682 scope.go:117] "RemoveContainer" containerID="b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e" Dec 10 11:02:35 crc kubenswrapper[4682]: E1210 11:02:35.817615 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e\": container with ID starting with b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e not found: ID does not exist" containerID="b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.817649 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e"} err="failed to get container status \"b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e\": rpc error: code = NotFound desc = could not find container \"b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e\": container with ID starting with b92c319a07c3a1434604bc15668a94b12aa774c02d37055545192a8897c0571e not found: ID does not exist" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.821238 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-fg9v7" podStartSLOduration=1.771525518 podStartE2EDuration="1.821225419s" podCreationTimestamp="2025-12-10 11:02:34 +0000 UTC" firstStartedPulling="2025-12-10 11:02:35.579755286 +0000 UTC m=+1035.899966046" lastFinishedPulling="2025-12-10 11:02:35.629455197 +0000 UTC m=+1035.949665947" observedRunningTime="2025-12-10 11:02:35.817142971 +0000 UTC m=+1036.137353751" watchObservedRunningTime="2025-12-10 11:02:35.821225419 +0000 UTC m=+1036.141436179" Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.833370 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lqn88"] Dec 10 11:02:35 crc kubenswrapper[4682]: I1210 11:02:35.838579 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-lqn88"] Dec 10 11:02:36 crc kubenswrapper[4682]: I1210 11:02:36.391632 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0753e0fc-e38c-463c-a8d3-800d51598e59" path="/var/lib/kubelet/pods/0753e0fc-e38c-463c-a8d3-800d51598e59/volumes" Dec 10 11:02:45 crc kubenswrapper[4682]: I1210 11:02:45.125223 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:45 crc kubenswrapper[4682]: I1210 11:02:45.125941 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:45 crc kubenswrapper[4682]: I1210 11:02:45.149666 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:45 crc kubenswrapper[4682]: I1210 11:02:45.892326 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-fg9v7" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.423115 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb"] Dec 10 11:02:48 crc kubenswrapper[4682]: E1210 11:02:48.423789 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0753e0fc-e38c-463c-a8d3-800d51598e59" containerName="registry-server" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.423807 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0753e0fc-e38c-463c-a8d3-800d51598e59" containerName="registry-server" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.423941 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0753e0fc-e38c-463c-a8d3-800d51598e59" containerName="registry-server" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.425102 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.427743 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-nwkmz" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.436790 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb"] Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.564131 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-bundle\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.564226 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w65hz\" (UniqueName: \"kubernetes.io/projected/e903d396-7f4d-415c-8c7a-802cf7937946-kube-api-access-w65hz\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.564266 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-util\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.665779 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-bundle\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.665867 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w65hz\" (UniqueName: \"kubernetes.io/projected/e903d396-7f4d-415c-8c7a-802cf7937946-kube-api-access-w65hz\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.665902 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-util\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.666452 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-util\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.666659 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-bundle\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.700060 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w65hz\" (UniqueName: \"kubernetes.io/projected/e903d396-7f4d-415c-8c7a-802cf7937946-kube-api-access-w65hz\") pod \"0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:48 crc kubenswrapper[4682]: I1210 11:02:48.750348 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:49 crc kubenswrapper[4682]: I1210 11:02:49.166216 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb"] Dec 10 11:02:49 crc kubenswrapper[4682]: W1210 11:02:49.172659 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode903d396_7f4d_415c_8c7a_802cf7937946.slice/crio-16eaff5d21bf6ec06318016150036753f879b8e5933b805d1bd8261bc9c46764 WatchSource:0}: Error finding container 16eaff5d21bf6ec06318016150036753f879b8e5933b805d1bd8261bc9c46764: Status 404 returned error can't find the container with id 16eaff5d21bf6ec06318016150036753f879b8e5933b805d1bd8261bc9c46764 Dec 10 11:02:49 crc kubenswrapper[4682]: I1210 11:02:49.908841 4682 generic.go:334] "Generic (PLEG): container finished" podID="e903d396-7f4d-415c-8c7a-802cf7937946" containerID="6307e4d2d2089a026c028ed5ed830ca36271cc041affbcf571023bf5af9c98a2" exitCode=0 Dec 10 11:02:49 crc kubenswrapper[4682]: I1210 11:02:49.908930 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" event={"ID":"e903d396-7f4d-415c-8c7a-802cf7937946","Type":"ContainerDied","Data":"6307e4d2d2089a026c028ed5ed830ca36271cc041affbcf571023bf5af9c98a2"} Dec 10 11:02:49 crc kubenswrapper[4682]: I1210 11:02:49.909031 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" event={"ID":"e903d396-7f4d-415c-8c7a-802cf7937946","Type":"ContainerStarted","Data":"16eaff5d21bf6ec06318016150036753f879b8e5933b805d1bd8261bc9c46764"} Dec 10 11:02:50 crc kubenswrapper[4682]: I1210 11:02:50.921232 4682 generic.go:334] "Generic (PLEG): container finished" podID="e903d396-7f4d-415c-8c7a-802cf7937946" containerID="4d7f39e0b7dd8b176aaec1872ff39af5d9d0f4f98451decbb1ee91dc7e3da442" exitCode=0 Dec 10 11:02:50 crc kubenswrapper[4682]: I1210 11:02:50.921358 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" event={"ID":"e903d396-7f4d-415c-8c7a-802cf7937946","Type":"ContainerDied","Data":"4d7f39e0b7dd8b176aaec1872ff39af5d9d0f4f98451decbb1ee91dc7e3da442"} Dec 10 11:02:51 crc kubenswrapper[4682]: I1210 11:02:51.934497 4682 generic.go:334] "Generic (PLEG): container finished" podID="e903d396-7f4d-415c-8c7a-802cf7937946" containerID="a003305a10774419a72866d2111fa3469c8196d231b1d7ed24b14e168c9c6dea" exitCode=0 Dec 10 11:02:51 crc kubenswrapper[4682]: I1210 11:02:51.934841 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" event={"ID":"e903d396-7f4d-415c-8c7a-802cf7937946","Type":"ContainerDied","Data":"a003305a10774419a72866d2111fa3469c8196d231b1d7ed24b14e168c9c6dea"} Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.218744 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.336130 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-util\") pod \"e903d396-7f4d-415c-8c7a-802cf7937946\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.336216 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-bundle\") pod \"e903d396-7f4d-415c-8c7a-802cf7937946\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.336291 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w65hz\" (UniqueName: \"kubernetes.io/projected/e903d396-7f4d-415c-8c7a-802cf7937946-kube-api-access-w65hz\") pod \"e903d396-7f4d-415c-8c7a-802cf7937946\" (UID: \"e903d396-7f4d-415c-8c7a-802cf7937946\") " Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.336877 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-bundle" (OuterVolumeSpecName: "bundle") pod "e903d396-7f4d-415c-8c7a-802cf7937946" (UID: "e903d396-7f4d-415c-8c7a-802cf7937946"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.345266 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e903d396-7f4d-415c-8c7a-802cf7937946-kube-api-access-w65hz" (OuterVolumeSpecName: "kube-api-access-w65hz") pod "e903d396-7f4d-415c-8c7a-802cf7937946" (UID: "e903d396-7f4d-415c-8c7a-802cf7937946"). InnerVolumeSpecName "kube-api-access-w65hz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.356828 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-util" (OuterVolumeSpecName: "util") pod "e903d396-7f4d-415c-8c7a-802cf7937946" (UID: "e903d396-7f4d-415c-8c7a-802cf7937946"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.437632 4682 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-util\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.437667 4682 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e903d396-7f4d-415c-8c7a-802cf7937946-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.437678 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w65hz\" (UniqueName: \"kubernetes.io/projected/e903d396-7f4d-415c-8c7a-802cf7937946-kube-api-access-w65hz\") on node \"crc\" DevicePath \"\"" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.951949 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" event={"ID":"e903d396-7f4d-415c-8c7a-802cf7937946","Type":"ContainerDied","Data":"16eaff5d21bf6ec06318016150036753f879b8e5933b805d1bd8261bc9c46764"} Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.952008 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb" Dec 10 11:02:53 crc kubenswrapper[4682]: I1210 11:02:53.952021 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16eaff5d21bf6ec06318016150036753f879b8e5933b805d1bd8261bc9c46764" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.758990 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p"] Dec 10 11:03:03 crc kubenswrapper[4682]: E1210 11:03:03.759864 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="util" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.759881 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="util" Dec 10 11:03:03 crc kubenswrapper[4682]: E1210 11:03:03.759896 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="pull" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.759903 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="pull" Dec 10 11:03:03 crc kubenswrapper[4682]: E1210 11:03:03.759914 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="extract" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.759920 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="extract" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.760075 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="e903d396-7f4d-415c-8c7a-802cf7937946" containerName="extract" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.760642 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.767079 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-t5br9" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.807590 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p"] Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.877462 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr47q\" (UniqueName: \"kubernetes.io/projected/c7b3146b-cbe2-443c-b721-060df70df8ed-kube-api-access-lr47q\") pod \"openstack-operator-controller-operator-7b77d4dbbf-7lq6p\" (UID: \"c7b3146b-cbe2-443c-b721-060df70df8ed\") " pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.978544 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr47q\" (UniqueName: \"kubernetes.io/projected/c7b3146b-cbe2-443c-b721-060df70df8ed-kube-api-access-lr47q\") pod \"openstack-operator-controller-operator-7b77d4dbbf-7lq6p\" (UID: \"c7b3146b-cbe2-443c-b721-060df70df8ed\") " pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:03 crc kubenswrapper[4682]: I1210 11:03:03.996192 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr47q\" (UniqueName: \"kubernetes.io/projected/c7b3146b-cbe2-443c-b721-060df70df8ed-kube-api-access-lr47q\") pod \"openstack-operator-controller-operator-7b77d4dbbf-7lq6p\" (UID: \"c7b3146b-cbe2-443c-b721-060df70df8ed\") " pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:04 crc kubenswrapper[4682]: I1210 11:03:04.081758 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:04 crc kubenswrapper[4682]: I1210 11:03:04.626318 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p"] Dec 10 11:03:04 crc kubenswrapper[4682]: W1210 11:03:04.630628 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7b3146b_cbe2_443c_b721_060df70df8ed.slice/crio-f0c714c12c70faa02e71edcfc0ff5db87171ddeb2cc90b1f1c4307e305db2ee3 WatchSource:0}: Error finding container f0c714c12c70faa02e71edcfc0ff5db87171ddeb2cc90b1f1c4307e305db2ee3: Status 404 returned error can't find the container with id f0c714c12c70faa02e71edcfc0ff5db87171ddeb2cc90b1f1c4307e305db2ee3 Dec 10 11:03:05 crc kubenswrapper[4682]: I1210 11:03:05.030303 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" event={"ID":"c7b3146b-cbe2-443c-b721-060df70df8ed","Type":"ContainerStarted","Data":"f0c714c12c70faa02e71edcfc0ff5db87171ddeb2cc90b1f1c4307e305db2ee3"} Dec 10 11:03:09 crc kubenswrapper[4682]: I1210 11:03:09.060612 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" event={"ID":"c7b3146b-cbe2-443c-b721-060df70df8ed","Type":"ContainerStarted","Data":"ccc8ca79904c4ebf6406a8f74de59d6aec3c5d44e9adb1a9dcbffadcd356dff3"} Dec 10 11:03:09 crc kubenswrapper[4682]: I1210 11:03:09.061143 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:09 crc kubenswrapper[4682]: I1210 11:03:09.093417 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" podStartSLOduration=2.396530412 podStartE2EDuration="6.093401793s" podCreationTimestamp="2025-12-10 11:03:03 +0000 UTC" firstStartedPulling="2025-12-10 11:03:04.632263801 +0000 UTC m=+1064.952474551" lastFinishedPulling="2025-12-10 11:03:08.329135182 +0000 UTC m=+1068.649345932" observedRunningTime="2025-12-10 11:03:09.091170563 +0000 UTC m=+1069.411381333" watchObservedRunningTime="2025-12-10 11:03:09.093401793 +0000 UTC m=+1069.413612543" Dec 10 11:03:14 crc kubenswrapper[4682]: I1210 11:03:14.085909 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7b77d4dbbf-7lq6p" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.136304 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.137825 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.155096 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-5z8hb" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.168422 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.173291 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwsfm\" (UniqueName: \"kubernetes.io/projected/4985e1e4-e9fa-406a-a744-45d9e9dc8135-kube-api-access-vwsfm\") pod \"barbican-operator-controller-manager-7d9dfd778-6lth6\" (UID: \"4985e1e4-e9fa-406a-a744-45d9e9dc8135\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.174952 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.179913 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.182156 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-blkb9" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.204012 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.205386 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.213457 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-98qlr" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.225784 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.260152 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.261351 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.264653 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.265614 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.265658 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-7j27z" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.272460 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-7xrkx" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.275127 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwsfm\" (UniqueName: \"kubernetes.io/projected/4985e1e4-e9fa-406a-a744-45d9e9dc8135-kube-api-access-vwsfm\") pod \"barbican-operator-controller-manager-7d9dfd778-6lth6\" (UID: \"4985e1e4-e9fa-406a-a744-45d9e9dc8135\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.300530 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.334758 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.340891 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.348221 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwsfm\" (UniqueName: \"kubernetes.io/projected/4985e1e4-e9fa-406a-a744-45d9e9dc8135-kube-api-access-vwsfm\") pod \"barbican-operator-controller-manager-7d9dfd778-6lth6\" (UID: \"4985e1e4-e9fa-406a-a744-45d9e9dc8135\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.359531 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.360567 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.364818 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-hzhcz" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.376084 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9ltw\" (UniqueName: \"kubernetes.io/projected/35d24c54-906b-406e-b03e-9fe2008fbb10-kube-api-access-r9ltw\") pod \"glance-operator-controller-manager-5697bb5779-b28jt\" (UID: \"35d24c54-906b-406e-b03e-9fe2008fbb10\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.376146 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f2kh\" (UniqueName: \"kubernetes.io/projected/41b81f6b-1509-4330-b9b7-8692c065e8d0-kube-api-access-8f2kh\") pod \"cinder-operator-controller-manager-6c677c69b-j9rxl\" (UID: \"41b81f6b-1509-4330-b9b7-8692c065e8d0\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.376165 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfvqr\" (UniqueName: \"kubernetes.io/projected/ecddf494-21c9-4fe4-9431-a61d9bc6ba0d-kube-api-access-hfvqr\") pod \"designate-operator-controller-manager-697fb699cf-ldjzz\" (UID: \"ecddf494-21c9-4fe4-9431-a61d9bc6ba0d\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.376180 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hppcm\" (UniqueName: \"kubernetes.io/projected/539df8ed-9553-4ce0-be01-36055d2ab100-kube-api-access-hppcm\") pod \"heat-operator-controller-manager-5f64f6f8bb-26vbl\" (UID: \"539df8ed-9553-4ce0-be01-36055d2ab100\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.382480 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.391318 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.400997 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qlrls" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.402068 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.420943 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.421010 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.421981 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.422074 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.429412 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rxpkj" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.454089 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.459511 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.463536 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.464556 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.473857 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-4js4f" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479109 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9ltw\" (UniqueName: \"kubernetes.io/projected/35d24c54-906b-406e-b03e-9fe2008fbb10-kube-api-access-r9ltw\") pod \"glance-operator-controller-manager-5697bb5779-b28jt\" (UID: \"35d24c54-906b-406e-b03e-9fe2008fbb10\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479165 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8f2kh\" (UniqueName: \"kubernetes.io/projected/41b81f6b-1509-4330-b9b7-8692c065e8d0-kube-api-access-8f2kh\") pod \"cinder-operator-controller-manager-6c677c69b-j9rxl\" (UID: \"41b81f6b-1509-4330-b9b7-8692c065e8d0\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479192 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfvqr\" (UniqueName: \"kubernetes.io/projected/ecddf494-21c9-4fe4-9431-a61d9bc6ba0d-kube-api-access-hfvqr\") pod \"designate-operator-controller-manager-697fb699cf-ldjzz\" (UID: \"ecddf494-21c9-4fe4-9431-a61d9bc6ba0d\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479216 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hppcm\" (UniqueName: \"kubernetes.io/projected/539df8ed-9553-4ce0-be01-36055d2ab100-kube-api-access-hppcm\") pod \"heat-operator-controller-manager-5f64f6f8bb-26vbl\" (UID: \"539df8ed-9553-4ce0-be01-36055d2ab100\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479252 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nlcw\" (UniqueName: \"kubernetes.io/projected/754f75d2-ce2a-4983-a82a-c62a2ffb2b04-kube-api-access-4nlcw\") pod \"horizon-operator-controller-manager-68c6d99b8f-s7vjn\" (UID: \"754f75d2-ce2a-4983-a82a-c62a2ffb2b04\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479284 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npwn9\" (UniqueName: \"kubernetes.io/projected/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-kube-api-access-npwn9\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.479313 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.524855 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.526197 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.541097 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.541202 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-ksb8q" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.557174 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfvqr\" (UniqueName: \"kubernetes.io/projected/ecddf494-21c9-4fe4-9431-a61d9bc6ba0d-kube-api-access-hfvqr\") pod \"designate-operator-controller-manager-697fb699cf-ldjzz\" (UID: \"ecddf494-21c9-4fe4-9431-a61d9bc6ba0d\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.557920 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.566409 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f2kh\" (UniqueName: \"kubernetes.io/projected/41b81f6b-1509-4330-b9b7-8692c065e8d0-kube-api-access-8f2kh\") pod \"cinder-operator-controller-manager-6c677c69b-j9rxl\" (UID: \"41b81f6b-1509-4330-b9b7-8692c065e8d0\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.572383 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9ltw\" (UniqueName: \"kubernetes.io/projected/35d24c54-906b-406e-b03e-9fe2008fbb10-kube-api-access-r9ltw\") pod \"glance-operator-controller-manager-5697bb5779-b28jt\" (UID: \"35d24c54-906b-406e-b03e-9fe2008fbb10\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.582276 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nlcw\" (UniqueName: \"kubernetes.io/projected/754f75d2-ce2a-4983-a82a-c62a2ffb2b04-kube-api-access-4nlcw\") pod \"horizon-operator-controller-manager-68c6d99b8f-s7vjn\" (UID: \"754f75d2-ce2a-4983-a82a-c62a2ffb2b04\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.582335 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdwvx\" (UniqueName: \"kubernetes.io/projected/00c2e072-614d-483b-a9da-86f271a88095-kube-api-access-kdwvx\") pod \"ironic-operator-controller-manager-967d97867-cxbm8\" (UID: \"00c2e072-614d-483b-a9da-86f271a88095\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.582359 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npwn9\" (UniqueName: \"kubernetes.io/projected/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-kube-api-access-npwn9\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.582393 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.582458 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5l7s\" (UniqueName: \"kubernetes.io/projected/79c97552-a229-4d38-ac96-79c2ef3303bf-kube-api-access-r5l7s\") pod \"keystone-operator-controller-manager-7765d96ddf-mkfhq\" (UID: \"79c97552-a229-4d38-ac96-79c2ef3303bf\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:03:34 crc kubenswrapper[4682]: E1210 11:03:34.583715 4682 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:34 crc kubenswrapper[4682]: E1210 11:03:34.583772 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert podName:84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f nodeName:}" failed. No retries permitted until 2025-12-10 11:03:35.08375182 +0000 UTC m=+1095.403962570 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert") pod "infra-operator-controller-manager-78d48bff9d-lffwd" (UID: "84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.598223 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.598946 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.601824 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hppcm\" (UniqueName: \"kubernetes.io/projected/539df8ed-9553-4ce0-be01-36055d2ab100-kube-api-access-hppcm\") pod \"heat-operator-controller-manager-5f64f6f8bb-26vbl\" (UID: \"539df8ed-9553-4ce0-be01-36055d2ab100\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.683812 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.700436 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdwvx\" (UniqueName: \"kubernetes.io/projected/00c2e072-614d-483b-a9da-86f271a88095-kube-api-access-kdwvx\") pod \"ironic-operator-controller-manager-967d97867-cxbm8\" (UID: \"00c2e072-614d-483b-a9da-86f271a88095\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.700529 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnnd8\" (UniqueName: \"kubernetes.io/projected/c69a769f-919b-4cf6-9957-a4cdc2a8f8d7-kube-api-access-wnnd8\") pod \"manila-operator-controller-manager-5b5fd79c9c-tv4q2\" (UID: \"c69a769f-919b-4cf6-9957-a4cdc2a8f8d7\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.700564 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5l7s\" (UniqueName: \"kubernetes.io/projected/79c97552-a229-4d38-ac96-79c2ef3303bf-kube-api-access-r5l7s\") pod \"keystone-operator-controller-manager-7765d96ddf-mkfhq\" (UID: \"79c97552-a229-4d38-ac96-79c2ef3303bf\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.704049 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nlcw\" (UniqueName: \"kubernetes.io/projected/754f75d2-ce2a-4983-a82a-c62a2ffb2b04-kube-api-access-4nlcw\") pod \"horizon-operator-controller-manager-68c6d99b8f-s7vjn\" (UID: \"754f75d2-ce2a-4983-a82a-c62a2ffb2b04\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.716430 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npwn9\" (UniqueName: \"kubernetes.io/projected/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-kube-api-access-npwn9\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.741495 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.742873 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.743977 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.762112 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-spnqs" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.778115 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.778250 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.778229 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.788690 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-8l7xt" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.810191 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.810613 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cq8t\" (UniqueName: \"kubernetes.io/projected/c3051489-ad76-489d-b143-a913219881da-kube-api-access-7cq8t\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-mnl7f\" (UID: \"c3051489-ad76-489d-b143-a913219881da\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.810684 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnnd8\" (UniqueName: \"kubernetes.io/projected/c69a769f-919b-4cf6-9957-a4cdc2a8f8d7-kube-api-access-wnnd8\") pod \"manila-operator-controller-manager-5b5fd79c9c-tv4q2\" (UID: \"c69a769f-919b-4cf6-9957-a4cdc2a8f8d7\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.810715 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9jln\" (UniqueName: \"kubernetes.io/projected/78994f55-53cc-46ce-a67f-8bcde14796f4-kube-api-access-l9jln\") pod \"mariadb-operator-controller-manager-79c8c4686c-5jw7k\" (UID: \"78994f55-53cc-46ce-a67f-8bcde14796f4\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.811677 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5l7s\" (UniqueName: \"kubernetes.io/projected/79c97552-a229-4d38-ac96-79c2ef3303bf-kube-api-access-r5l7s\") pod \"keystone-operator-controller-manager-7765d96ddf-mkfhq\" (UID: \"79c97552-a229-4d38-ac96-79c2ef3303bf\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.812494 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.815458 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.816739 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdwvx\" (UniqueName: \"kubernetes.io/projected/00c2e072-614d-483b-a9da-86f271a88095-kube-api-access-kdwvx\") pod \"ironic-operator-controller-manager-967d97867-cxbm8\" (UID: \"00c2e072-614d-483b-a9da-86f271a88095\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.828594 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.835526 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-lddjn"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.836644 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.842822 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-ch2p6" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.848313 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-kbst4" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.852073 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-lddjn"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.872579 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.874009 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.878727 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-wcrdj" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.893791 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnnd8\" (UniqueName: \"kubernetes.io/projected/c69a769f-919b-4cf6-9957-a4cdc2a8f8d7-kube-api-access-wnnd8\") pod \"manila-operator-controller-manager-5b5fd79c9c-tv4q2\" (UID: \"c69a769f-919b-4cf6-9957-a4cdc2a8f8d7\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.905537 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.906810 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.911969 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.912208 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-995kn" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.913573 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cq8t\" (UniqueName: \"kubernetes.io/projected/c3051489-ad76-489d-b143-a913219881da-kube-api-access-7cq8t\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-mnl7f\" (UID: \"c3051489-ad76-489d-b143-a913219881da\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.913683 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9jln\" (UniqueName: \"kubernetes.io/projected/78994f55-53cc-46ce-a67f-8bcde14796f4-kube-api-access-l9jln\") pod \"mariadb-operator-controller-manager-79c8c4686c-5jw7k\" (UID: \"78994f55-53cc-46ce-a67f-8bcde14796f4\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.931656 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.939605 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.967905 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-lttng"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.977396 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.979311 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-4pbhv" Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.979531 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-lttng"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.996120 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr"] Dec 10 11:03:34 crc kubenswrapper[4682]: I1210 11:03:34.997260 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.000914 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-j8hg5" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.010978 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.011892 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.013888 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.014901 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2q7m\" (UniqueName: \"kubernetes.io/projected/a4641319-ef96-4ffb-ac2e-a35154984ba8-kube-api-access-l2q7m\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.014936 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnd7g\" (UniqueName: \"kubernetes.io/projected/091fd04a-949b-4f31-8c04-80402b84ac36-kube-api-access-wnd7g\") pod \"ovn-operator-controller-manager-b6456fdb6-5k89m\" (UID: \"091fd04a-949b-4f31-8c04-80402b84ac36\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.014961 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgqhm\" (UniqueName: \"kubernetes.io/projected/3224e18d-9f3d-4c9c-abb9-eed4fa24989c-kube-api-access-vgqhm\") pod \"octavia-operator-controller-manager-998648c74-lddjn\" (UID: \"3224e18d-9f3d-4c9c-abb9-eed4fa24989c\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.014985 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.015030 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42l9p\" (UniqueName: \"kubernetes.io/projected/0f6f4969-902f-44e4-a29e-fcb24ce0d7e4-kube-api-access-42l9p\") pod \"nova-operator-controller-manager-697bc559fc-9rc5v\" (UID: \"0f6f4969-902f-44e4-a29e-fcb24ce0d7e4\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.019129 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.019680 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-h2dvq" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.027001 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.041954 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.046686 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.047759 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cq8t\" (UniqueName: \"kubernetes.io/projected/c3051489-ad76-489d-b143-a913219881da-kube-api-access-7cq8t\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-mnl7f\" (UID: \"c3051489-ad76-489d-b143-a913219881da\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.071052 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9jln\" (UniqueName: \"kubernetes.io/projected/78994f55-53cc-46ce-a67f-8bcde14796f4-kube-api-access-l9jln\") pod \"mariadb-operator-controller-manager-79c8c4686c-5jw7k\" (UID: \"78994f55-53cc-46ce-a67f-8bcde14796f4\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.079269 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.089762 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.110193 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.112136 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119415 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22ss4\" (UniqueName: \"kubernetes.io/projected/028aa123-014b-4836-a8d8-e0acafea568f-kube-api-access-22ss4\") pod \"swift-operator-controller-manager-9d58d64bc-v9vrr\" (UID: \"028aa123-014b-4836-a8d8-e0acafea568f\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119477 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glcmf\" (UniqueName: \"kubernetes.io/projected/8f9fd772-c4a8-48d1-8294-c0572ad44506-kube-api-access-glcmf\") pod \"test-operator-controller-manager-5854674fcc-fz6ds\" (UID: \"8f9fd772-c4a8-48d1-8294-c0572ad44506\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119501 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8znd7\" (UniqueName: \"kubernetes.io/projected/0d277084-bc96-4bcb-a090-76ef7e2f385e-kube-api-access-8znd7\") pod \"placement-operator-controller-manager-78f8948974-lttng\" (UID: \"0d277084-bc96-4bcb-a090-76ef7e2f385e\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119525 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42l9p\" (UniqueName: \"kubernetes.io/projected/0f6f4969-902f-44e4-a29e-fcb24ce0d7e4-kube-api-access-42l9p\") pod \"nova-operator-controller-manager-697bc559fc-9rc5v\" (UID: \"0f6f4969-902f-44e4-a29e-fcb24ce0d7e4\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119562 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119608 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2q7m\" (UniqueName: \"kubernetes.io/projected/a4641319-ef96-4ffb-ac2e-a35154984ba8-kube-api-access-l2q7m\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119626 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnd7g\" (UniqueName: \"kubernetes.io/projected/091fd04a-949b-4f31-8c04-80402b84ac36-kube-api-access-wnd7g\") pod \"ovn-operator-controller-manager-b6456fdb6-5k89m\" (UID: \"091fd04a-949b-4f31-8c04-80402b84ac36\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119648 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md57n\" (UniqueName: \"kubernetes.io/projected/c2238ca5-7b77-471b-a743-75e076a61ce1-kube-api-access-md57n\") pod \"telemetry-operator-controller-manager-54d54d59bc-cjf8w\" (UID: \"c2238ca5-7b77-471b-a743-75e076a61ce1\") " pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119668 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgqhm\" (UniqueName: \"kubernetes.io/projected/3224e18d-9f3d-4c9c-abb9-eed4fa24989c-kube-api-access-vgqhm\") pod \"octavia-operator-controller-manager-998648c74-lddjn\" (UID: \"3224e18d-9f3d-4c9c-abb9-eed4fa24989c\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.119698 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.119835 4682 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.119879 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert podName:a4641319-ef96-4ffb-ac2e-a35154984ba8 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:35.619863128 +0000 UTC m=+1095.940073878 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdgsbh" (UID: "a4641319-ef96-4ffb-ac2e-a35154984ba8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.120243 4682 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.120267 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert podName:84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f nodeName:}" failed. No retries permitted until 2025-12-10 11:03:36.12025932 +0000 UTC m=+1096.440470070 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert") pod "infra-operator-controller-manager-78d48bff9d-lffwd" (UID: "84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.155109 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.168102 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-98clf" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.184081 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.205221 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnd7g\" (UniqueName: \"kubernetes.io/projected/091fd04a-949b-4f31-8c04-80402b84ac36-kube-api-access-wnd7g\") pod \"ovn-operator-controller-manager-b6456fdb6-5k89m\" (UID: \"091fd04a-949b-4f31-8c04-80402b84ac36\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.210133 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42l9p\" (UniqueName: \"kubernetes.io/projected/0f6f4969-902f-44e4-a29e-fcb24ce0d7e4-kube-api-access-42l9p\") pod \"nova-operator-controller-manager-697bc559fc-9rc5v\" (UID: \"0f6f4969-902f-44e4-a29e-fcb24ce0d7e4\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.228012 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2q7m\" (UniqueName: \"kubernetes.io/projected/a4641319-ef96-4ffb-ac2e-a35154984ba8-kube-api-access-l2q7m\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.235450 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.235600 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.240093 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22ss4\" (UniqueName: \"kubernetes.io/projected/028aa123-014b-4836-a8d8-e0acafea568f-kube-api-access-22ss4\") pod \"swift-operator-controller-manager-9d58d64bc-v9vrr\" (UID: \"028aa123-014b-4836-a8d8-e0acafea568f\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.240140 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glcmf\" (UniqueName: \"kubernetes.io/projected/8f9fd772-c4a8-48d1-8294-c0572ad44506-kube-api-access-glcmf\") pod \"test-operator-controller-manager-5854674fcc-fz6ds\" (UID: \"8f9fd772-c4a8-48d1-8294-c0572ad44506\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.240168 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8znd7\" (UniqueName: \"kubernetes.io/projected/0d277084-bc96-4bcb-a090-76ef7e2f385e-kube-api-access-8znd7\") pod \"placement-operator-controller-manager-78f8948974-lttng\" (UID: \"0d277084-bc96-4bcb-a090-76ef7e2f385e\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.240262 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md57n\" (UniqueName: \"kubernetes.io/projected/c2238ca5-7b77-471b-a743-75e076a61ce1-kube-api-access-md57n\") pod \"telemetry-operator-controller-manager-54d54d59bc-cjf8w\" (UID: \"c2238ca5-7b77-471b-a743-75e076a61ce1\") " pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.256354 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-58np4" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.273753 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgqhm\" (UniqueName: \"kubernetes.io/projected/3224e18d-9f3d-4c9c-abb9-eed4fa24989c-kube-api-access-vgqhm\") pod \"octavia-operator-controller-manager-998648c74-lddjn\" (UID: \"3224e18d-9f3d-4c9c-abb9-eed4fa24989c\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.274526 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.326612 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.328172 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.332177 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.339830 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.340101 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.340230 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-kcmjg" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.366116 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t5x5\" (UniqueName: \"kubernetes.io/projected/f76d2d54-5cd6-4e5c-b719-92117a1e6cb9-kube-api-access-9t5x5\") pod \"watcher-operator-controller-manager-667bd8d554-5x7z6\" (UID: \"f76d2d54-5cd6-4e5c-b719-92117a1e6cb9\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.372267 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.396548 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.397707 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.403806 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-mw9vd" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.409317 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7"] Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.424674 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8znd7\" (UniqueName: \"kubernetes.io/projected/0d277084-bc96-4bcb-a090-76ef7e2f385e-kube-api-access-8znd7\") pod \"placement-operator-controller-manager-78f8948974-lttng\" (UID: \"0d277084-bc96-4bcb-a090-76ef7e2f385e\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.425641 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md57n\" (UniqueName: \"kubernetes.io/projected/c2238ca5-7b77-471b-a743-75e076a61ce1-kube-api-access-md57n\") pod \"telemetry-operator-controller-manager-54d54d59bc-cjf8w\" (UID: \"c2238ca5-7b77-471b-a743-75e076a61ce1\") " pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.429028 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glcmf\" (UniqueName: \"kubernetes.io/projected/8f9fd772-c4a8-48d1-8294-c0572ad44506-kube-api-access-glcmf\") pod \"test-operator-controller-manager-5854674fcc-fz6ds\" (UID: \"8f9fd772-c4a8-48d1-8294-c0572ad44506\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.432600 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22ss4\" (UniqueName: \"kubernetes.io/projected/028aa123-014b-4836-a8d8-e0acafea568f-kube-api-access-22ss4\") pod \"swift-operator-controller-manager-9d58d64bc-v9vrr\" (UID: \"028aa123-014b-4836-a8d8-e0acafea568f\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.467726 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkjmx\" (UniqueName: \"kubernetes.io/projected/b92a5136-09a9-49c1-ad89-bf46bccb9d45-kube-api-access-rkjmx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-76vm7\" (UID: \"b92a5136-09a9-49c1-ad89-bf46bccb9d45\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.467789 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.467809 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.467829 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t5x5\" (UniqueName: \"kubernetes.io/projected/f76d2d54-5cd6-4e5c-b719-92117a1e6cb9-kube-api-access-9t5x5\") pod \"watcher-operator-controller-manager-667bd8d554-5x7z6\" (UID: \"f76d2d54-5cd6-4e5c-b719-92117a1e6cb9\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.467881 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt4ks\" (UniqueName: \"kubernetes.io/projected/31129f99-dd83-4b51-a741-5629f1f825fb-kube-api-access-tt4ks\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.509527 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.534998 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t5x5\" (UniqueName: \"kubernetes.io/projected/f76d2d54-5cd6-4e5c-b719-92117a1e6cb9-kube-api-access-9t5x5\") pod \"watcher-operator-controller-manager-667bd8d554-5x7z6\" (UID: \"f76d2d54-5cd6-4e5c-b719-92117a1e6cb9\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.545893 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.571702 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.571752 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.571805 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt4ks\" (UniqueName: \"kubernetes.io/projected/31129f99-dd83-4b51-a741-5629f1f825fb-kube-api-access-tt4ks\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.571943 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkjmx\" (UniqueName: \"kubernetes.io/projected/b92a5136-09a9-49c1-ad89-bf46bccb9d45-kube-api-access-rkjmx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-76vm7\" (UID: \"b92a5136-09a9-49c1-ad89-bf46bccb9d45\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.572610 4682 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.572687 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:36.072640875 +0000 UTC m=+1096.392851625 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "metrics-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.572843 4682 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.572872 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:36.072863382 +0000 UTC m=+1096.393074142 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.582782 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.624400 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt4ks\" (UniqueName: \"kubernetes.io/projected/31129f99-dd83-4b51-a741-5629f1f825fb-kube-api-access-tt4ks\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.663631 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkjmx\" (UniqueName: \"kubernetes.io/projected/b92a5136-09a9-49c1-ad89-bf46bccb9d45-kube-api-access-rkjmx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-76vm7\" (UID: \"b92a5136-09a9-49c1-ad89-bf46bccb9d45\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.671237 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.695779 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.696314 4682 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: E1210 11:03:35.696365 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert podName:a4641319-ef96-4ffb-ac2e-a35154984ba8 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:36.696347058 +0000 UTC m=+1097.016557808 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdgsbh" (UID: "a4641319-ef96-4ffb-ac2e-a35154984ba8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.728253 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.796262 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.816771 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:03:35 crc kubenswrapper[4682]: I1210 11:03:35.888384 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" Dec 10 11:03:36 crc kubenswrapper[4682]: I1210 11:03:36.115243 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:36 crc kubenswrapper[4682]: I1210 11:03:36.115298 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.115431 4682 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.115528 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:37.115463075 +0000 UTC m=+1097.435673825 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "webhook-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.115751 4682 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.115829 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:37.115807296 +0000 UTC m=+1097.436018096 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "metrics-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: I1210 11:03:36.230134 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.230406 4682 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.230483 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert podName:84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f nodeName:}" failed. No retries permitted until 2025-12-10 11:03:38.230446041 +0000 UTC m=+1098.550656791 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert") pod "infra-operator-controller-manager-78d48bff9d-lffwd" (UID: "84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: I1210 11:03:36.407405 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6"] Dec 10 11:03:36 crc kubenswrapper[4682]: I1210 11:03:36.701563 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.702107 4682 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:36 crc kubenswrapper[4682]: E1210 11:03:36.702165 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert podName:a4641319-ef96-4ffb-ac2e-a35154984ba8 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:38.70214423 +0000 UTC m=+1099.022354980 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdgsbh" (UID: "a4641319-ef96-4ffb-ac2e-a35154984ba8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.124359 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.124444 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:37 crc kubenswrapper[4682]: E1210 11:03:37.124587 4682 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:03:37 crc kubenswrapper[4682]: E1210 11:03:37.124658 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:39.124639645 +0000 UTC m=+1099.444850395 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "metrics-server-cert" not found Dec 10 11:03:37 crc kubenswrapper[4682]: E1210 11:03:37.124706 4682 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:03:37 crc kubenswrapper[4682]: E1210 11:03:37.124770 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:39.124750998 +0000 UTC m=+1099.444961828 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "webhook-server-cert" not found Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.273965 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.351868 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.368944 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.425187 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" event={"ID":"4985e1e4-e9fa-406a-a744-45d9e9dc8135","Type":"ContainerStarted","Data":"d380d128d803a854336772e8810bbb716e2fe71ee65acae3194b4fbb225f8894"} Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.437363 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" event={"ID":"ecddf494-21c9-4fe4-9431-a61d9bc6ba0d","Type":"ContainerStarted","Data":"eb1dace863f2526f958ec77e2f773ff9cd7501d41a1af6775445b75bfe7c7056"} Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.453356 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" event={"ID":"41b81f6b-1509-4330-b9b7-8692c065e8d0","Type":"ContainerStarted","Data":"fdd52d12e7885f67d5e3eea7fcce3fe7845c98a366bd94d382a11f9c4f76a6bd"} Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.466009 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" event={"ID":"35d24c54-906b-406e-b03e-9fe2008fbb10","Type":"ContainerStarted","Data":"f805a6e1bb800e6d0f31fabb9027007019a92ad62e0c0d70745d2312c04fca80"} Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.543190 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.569795 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.575809 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.841541 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.847588 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.853120 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.893676 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.944141 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k"] Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.950281 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w"] Dec 10 11:03:37 crc kubenswrapper[4682]: W1210 11:03:37.968809 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2238ca5_7b77_471b_a743_75e076a61ce1.slice/crio-edbecacb65aa3e3db3e33b20a541b68cee50e92a0573a2f53260b2ad2ad307fc WatchSource:0}: Error finding container edbecacb65aa3e3db3e33b20a541b68cee50e92a0573a2f53260b2ad2ad307fc: Status 404 returned error can't find the container with id edbecacb65aa3e3db3e33b20a541b68cee50e92a0573a2f53260b2ad2ad307fc Dec 10 11:03:37 crc kubenswrapper[4682]: I1210 11:03:37.994063 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8"] Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.006067 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2"] Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.012133 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr"] Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.036799 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wnnd8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-tv4q2_openstack-operators(c69a769f-919b-4cf6-9957-a4cdc2a8f8d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.036859 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-22ss4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-v9vrr_openstack-operators(028aa123-014b-4836-a8d8-e0acafea568f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.039635 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-22ss4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-v9vrr_openstack-operators(028aa123-014b-4836-a8d8-e0acafea568f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.039635 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wnnd8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-tv4q2_openstack-operators(c69a769f-919b-4cf6-9957-a4cdc2a8f8d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.040769 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" podUID="c69a769f-919b-4cf6-9957-a4cdc2a8f8d7" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.040837 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" podUID="028aa123-014b-4836-a8d8-e0acafea568f" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.176933 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds"] Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.187947 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-lttng"] Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.195185 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-lddjn"] Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.201042 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6"] Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.212142 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8znd7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-lttng_openstack-operators(0d277084-bc96-4bcb-a090-76ef7e2f385e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.214977 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8znd7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-lttng_openstack-operators(0d277084-bc96-4bcb-a090-76ef7e2f385e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.216596 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" podUID="0d277084-bc96-4bcb-a090-76ef7e2f385e" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.229430 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-glcmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-fz6ds_openstack-operators(8f9fd772-c4a8-48d1-8294-c0572ad44506): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.233211 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9t5x5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-5x7z6_openstack-operators(f76d2d54-5cd6-4e5c-b719-92117a1e6cb9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.238015 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-glcmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-fz6ds_openstack-operators(8f9fd772-c4a8-48d1-8294-c0572ad44506): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.238523 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9t5x5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-5x7z6_openstack-operators(f76d2d54-5cd6-4e5c-b719-92117a1e6cb9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.239722 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" podUID="f76d2d54-5cd6-4e5c-b719-92117a1e6cb9" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.239938 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" podUID="8f9fd772-c4a8-48d1-8294-c0572ad44506" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.265887 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.266094 4682 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.266139 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert podName:84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f nodeName:}" failed. No retries permitted until 2025-12-10 11:03:42.266124231 +0000 UTC m=+1102.586334981 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert") pod "infra-operator-controller-manager-78d48bff9d-lffwd" (UID: "84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.483097 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" event={"ID":"c69a769f-919b-4cf6-9957-a4cdc2a8f8d7","Type":"ContainerStarted","Data":"4ec8b9a18af556275984ae07ff9da6d1b65accce61d438d3c556784506223451"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.484455 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" event={"ID":"c3051489-ad76-489d-b143-a913219881da","Type":"ContainerStarted","Data":"0e45b563431b0916ecbb52661765dc36df1e047f900438bab49657b68b580079"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.485543 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" event={"ID":"0f6f4969-902f-44e4-a29e-fcb24ce0d7e4","Type":"ContainerStarted","Data":"50b5eac8d844fcd02b25bc353106e3c0ce282c5b6505299c711105e7eb73a710"} Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.486453 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" podUID="c69a769f-919b-4cf6-9957-a4cdc2a8f8d7" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.487156 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" event={"ID":"79c97552-a229-4d38-ac96-79c2ef3303bf","Type":"ContainerStarted","Data":"f7769494929a791b8f58b1ceaedf97f01059feadf030ea1f19103b14ea7fba71"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.488622 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" event={"ID":"028aa123-014b-4836-a8d8-e0acafea568f","Type":"ContainerStarted","Data":"cd8d55024a06ec4e034913c1f4f52c528c6bffce43884f1c604b276e4f4d77c8"} Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.489512 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" podUID="028aa123-014b-4836-a8d8-e0acafea568f" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.489645 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" event={"ID":"754f75d2-ce2a-4983-a82a-c62a2ffb2b04","Type":"ContainerStarted","Data":"fbe5ea92a920dc59ac5a799f1690ce5e1fee93efa4ce6ed0ca58d72bfc9e046e"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.491829 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" event={"ID":"78994f55-53cc-46ce-a67f-8bcde14796f4","Type":"ContainerStarted","Data":"b57cc43a6f0130b11cb2a798fff7569b60095c7a6e0e5ab251bb274f132ee231"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.495186 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" event={"ID":"f76d2d54-5cd6-4e5c-b719-92117a1e6cb9","Type":"ContainerStarted","Data":"bedcafd983d40e179c9c9ba9e784c4362f45a6ec86f7cf1cee4d51b655f517be"} Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.496832 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" podUID="f76d2d54-5cd6-4e5c-b719-92117a1e6cb9" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.497164 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" event={"ID":"091fd04a-949b-4f31-8c04-80402b84ac36","Type":"ContainerStarted","Data":"72ea8873bc7a04230e97a6539ed41f179086dacc0d10e9d04b24b853cbfc19bd"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.512749 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" event={"ID":"8f9fd772-c4a8-48d1-8294-c0572ad44506","Type":"ContainerStarted","Data":"b70894855d54a70e60672eccb7ea9846c029757dbd44ae0767b407f665d227b7"} Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.515935 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" podUID="8f9fd772-c4a8-48d1-8294-c0572ad44506" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.519191 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" event={"ID":"0d277084-bc96-4bcb-a090-76ef7e2f385e","Type":"ContainerStarted","Data":"adc6a187f5400178a8cf5543088dd5361d18a3072b9c84e774b48d06db1f1fbf"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.528168 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" event={"ID":"c2238ca5-7b77-471b-a743-75e076a61ce1","Type":"ContainerStarted","Data":"edbecacb65aa3e3db3e33b20a541b68cee50e92a0573a2f53260b2ad2ad307fc"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.530721 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" event={"ID":"b92a5136-09a9-49c1-ad89-bf46bccb9d45","Type":"ContainerStarted","Data":"b7865cb456a7cf6bcb54c9a86ab14b448e711098a78fa761f46df7d95c74991a"} Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.531959 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" podUID="0d277084-bc96-4bcb-a090-76ef7e2f385e" Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.534946 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" event={"ID":"3224e18d-9f3d-4c9c-abb9-eed4fa24989c","Type":"ContainerStarted","Data":"077b6949499db1fd1a345c94c4c2b5227792998b495ad879fefa6882e2fde6f0"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.536329 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" event={"ID":"00c2e072-614d-483b-a9da-86f271a88095","Type":"ContainerStarted","Data":"0759b3784857bdeb3f03208911b08749f75ab21075542deb0c015647393a2e29"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.539835 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" event={"ID":"539df8ed-9553-4ce0-be01-36055d2ab100","Type":"ContainerStarted","Data":"f60882bbefb922ffd500b61a0dcd37f575188ed32c91041b81a4fd35b75317b1"} Dec 10 11:03:38 crc kubenswrapper[4682]: I1210 11:03:38.773147 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.773400 4682 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:38 crc kubenswrapper[4682]: E1210 11:03:38.773569 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert podName:a4641319-ef96-4ffb-ac2e-a35154984ba8 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:42.773541075 +0000 UTC m=+1103.093751826 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdgsbh" (UID: "a4641319-ef96-4ffb-ac2e-a35154984ba8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:39 crc kubenswrapper[4682]: I1210 11:03:39.181449 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:39 crc kubenswrapper[4682]: I1210 11:03:39.181855 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.181646 4682 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.182217 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:43.182133398 +0000 UTC m=+1103.502344198 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "metrics-server-cert" not found Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.182253 4682 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.182325 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:43.182303123 +0000 UTC m=+1103.502513923 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "webhook-server-cert" not found Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.565356 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" podUID="8f9fd772-c4a8-48d1-8294-c0572ad44506" Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.568594 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" podUID="c69a769f-919b-4cf6-9957-a4cdc2a8f8d7" Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.569035 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" podUID="0d277084-bc96-4bcb-a090-76ef7e2f385e" Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.569120 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" podUID="f76d2d54-5cd6-4e5c-b719-92117a1e6cb9" Dec 10 11:03:39 crc kubenswrapper[4682]: E1210 11:03:39.569229 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" podUID="028aa123-014b-4836-a8d8-e0acafea568f" Dec 10 11:03:42 crc kubenswrapper[4682]: I1210 11:03:42.349811 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:42 crc kubenswrapper[4682]: E1210 11:03:42.350080 4682 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:42 crc kubenswrapper[4682]: E1210 11:03:42.350591 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert podName:84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f nodeName:}" failed. No retries permitted until 2025-12-10 11:03:50.350563065 +0000 UTC m=+1110.670773975 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert") pod "infra-operator-controller-manager-78d48bff9d-lffwd" (UID: "84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f") : secret "infra-operator-webhook-server-cert" not found Dec 10 11:03:42 crc kubenswrapper[4682]: I1210 11:03:42.874158 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:42 crc kubenswrapper[4682]: E1210 11:03:42.874486 4682 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:42 crc kubenswrapper[4682]: E1210 11:03:42.874549 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert podName:a4641319-ef96-4ffb-ac2e-a35154984ba8 nodeName:}" failed. No retries permitted until 2025-12-10 11:03:50.87452748 +0000 UTC m=+1111.194738230 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdgsbh" (UID: "a4641319-ef96-4ffb-ac2e-a35154984ba8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 11:03:43 crc kubenswrapper[4682]: I1210 11:03:43.280210 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:43 crc kubenswrapper[4682]: I1210 11:03:43.280263 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:43 crc kubenswrapper[4682]: E1210 11:03:43.280365 4682 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:03:43 crc kubenswrapper[4682]: E1210 11:03:43.280440 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:51.280421575 +0000 UTC m=+1111.600632325 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "metrics-server-cert" not found Dec 10 11:03:43 crc kubenswrapper[4682]: E1210 11:03:43.280503 4682 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:03:43 crc kubenswrapper[4682]: E1210 11:03:43.280589 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:03:51.28056485 +0000 UTC m=+1111.600775600 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "webhook-server-cert" not found Dec 10 11:03:50 crc kubenswrapper[4682]: I1210 11:03:50.435908 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:50 crc kubenswrapper[4682]: I1210 11:03:50.442169 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f-cert\") pod \"infra-operator-controller-manager-78d48bff9d-lffwd\" (UID: \"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:50 crc kubenswrapper[4682]: I1210 11:03:50.621661 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:03:50 crc kubenswrapper[4682]: I1210 11:03:50.944068 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:50 crc kubenswrapper[4682]: I1210 11:03:50.947368 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4641319-ef96-4ffb-ac2e-a35154984ba8-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdgsbh\" (UID: \"a4641319-ef96-4ffb-ac2e-a35154984ba8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:51 crc kubenswrapper[4682]: I1210 11:03:51.022297 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:03:51 crc kubenswrapper[4682]: I1210 11:03:51.348039 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:51 crc kubenswrapper[4682]: I1210 11:03:51.348083 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:03:51 crc kubenswrapper[4682]: E1210 11:03:51.348553 4682 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 11:03:51 crc kubenswrapper[4682]: E1210 11:03:51.348625 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:04:07.348609906 +0000 UTC m=+1127.668820656 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "metrics-server-cert" not found Dec 10 11:03:51 crc kubenswrapper[4682]: E1210 11:03:51.348569 4682 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 11:03:51 crc kubenswrapper[4682]: E1210 11:03:51.348696 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs podName:31129f99-dd83-4b51-a741-5629f1f825fb nodeName:}" failed. No retries permitted until 2025-12-10 11:04:07.348684658 +0000 UTC m=+1127.668895408 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs") pod "openstack-operator-controller-manager-678c445b7b-gz66r" (UID: "31129f99-dd83-4b51-a741-5629f1f825fb") : secret "webhook-server-cert" not found Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.200755 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87" Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.201212 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kdwvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-967d97867-cxbm8_openstack-operators(00c2e072-614d-483b-a9da-86f271a88095): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.784097 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.784358 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wnd7g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-5k89m_openstack-operators(091fd04a-949b-4f31-8c04-80402b84ac36): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.932718 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.143:5001/openstack-k8s-operators/telemetry-operator:d352973ce9a498b9e2a14f554e860795ca5bcdcf" Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.932782 4682 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.143:5001/openstack-k8s-operators/telemetry-operator:d352973ce9a498b9e2a14f554e860795ca5bcdcf" Dec 10 11:03:52 crc kubenswrapper[4682]: E1210 11:03:52.932946 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.143:5001/openstack-k8s-operators/telemetry-operator:d352973ce9a498b9e2a14f554e860795ca5bcdcf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-md57n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-54d54d59bc-cjf8w_openstack-operators(c2238ca5-7b77-471b-a743-75e076a61ce1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:03:54 crc kubenswrapper[4682]: E1210 11:03:54.548938 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Dec 10 11:03:54 crc kubenswrapper[4682]: E1210 11:03:54.549530 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rkjmx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-76vm7_openstack-operators(b92a5136-09a9-49c1-ad89-bf46bccb9d45): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:03:54 crc kubenswrapper[4682]: E1210 11:03:54.550791 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" podUID="b92a5136-09a9-49c1-ad89-bf46bccb9d45" Dec 10 11:03:54 crc kubenswrapper[4682]: E1210 11:03:54.652289 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" podUID="b92a5136-09a9-49c1-ad89-bf46bccb9d45" Dec 10 11:03:55 crc kubenswrapper[4682]: E1210 11:03:55.047437 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7" Dec 10 11:03:55 crc kubenswrapper[4682]: E1210 11:03:55.047632 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r5l7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-mkfhq_openstack-operators(79c97552-a229-4d38-ac96-79c2ef3303bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:04:02 crc kubenswrapper[4682]: I1210 11:04:02.522200 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd"] Dec 10 11:04:02 crc kubenswrapper[4682]: I1210 11:04:02.568926 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh"] Dec 10 11:04:03 crc kubenswrapper[4682]: I1210 11:04:03.105124 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" event={"ID":"754f75d2-ce2a-4983-a82a-c62a2ffb2b04","Type":"ContainerStarted","Data":"0ff181f303e773ca8dcfc3d5d576b92aa27955094f7127e937351c4fb651e603"} Dec 10 11:04:03 crc kubenswrapper[4682]: I1210 11:04:03.108099 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" event={"ID":"ecddf494-21c9-4fe4-9431-a61d9bc6ba0d","Type":"ContainerStarted","Data":"b1da1062ccf53cc72d508cc7d35abf25249b389629a32cf988af3a4e1c994422"} Dec 10 11:04:03 crc kubenswrapper[4682]: I1210 11:04:03.115589 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" event={"ID":"41b81f6b-1509-4330-b9b7-8692c065e8d0","Type":"ContainerStarted","Data":"87991d9dbf14201c5b539c786a3d206bb5d59e22792282adf367f51cb3db5ca6"} Dec 10 11:04:03 crc kubenswrapper[4682]: I1210 11:04:03.130765 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" event={"ID":"78994f55-53cc-46ce-a67f-8bcde14796f4","Type":"ContainerStarted","Data":"65e6ce63fc0a0bf0ca12c174e48899897e3c9664985a3f6d4c3c4d7e225c6a04"} Dec 10 11:04:04 crc kubenswrapper[4682]: I1210 11:04:04.245649 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" event={"ID":"a4641319-ef96-4ffb-ac2e-a35154984ba8","Type":"ContainerStarted","Data":"d4d4dee6b9d2aab4e1573f138702e2f5b096a22f01230fd6a3c2494ec14c7c54"} Dec 10 11:04:04 crc kubenswrapper[4682]: I1210 11:04:04.252117 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" event={"ID":"4985e1e4-e9fa-406a-a744-45d9e9dc8135","Type":"ContainerStarted","Data":"39d51b866e185c5c901ebce5778b622fe6d501fc06b1e43d0d83fb96bde26a69"} Dec 10 11:04:04 crc kubenswrapper[4682]: I1210 11:04:04.253067 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" event={"ID":"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f","Type":"ContainerStarted","Data":"69ffd32e4f00983ba0dc997df58107b5156120b91eaa8b0a1eb4b82ea78fc717"} Dec 10 11:04:04 crc kubenswrapper[4682]: I1210 11:04:04.254449 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" event={"ID":"539df8ed-9553-4ce0-be01-36055d2ab100","Type":"ContainerStarted","Data":"4cffc014de661d0e12df21df12d870c08258c0495a8ee0628f5d1deebc7a6293"} Dec 10 11:04:06 crc kubenswrapper[4682]: I1210 11:04:06.478464 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:04:06 crc kubenswrapper[4682]: I1210 11:04:06.478865 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:04:07 crc kubenswrapper[4682]: I1210 11:04:07.352769 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:07 crc kubenswrapper[4682]: I1210 11:04:07.352831 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:07 crc kubenswrapper[4682]: I1210 11:04:07.360534 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-webhook-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:07 crc kubenswrapper[4682]: I1210 11:04:07.361323 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/31129f99-dd83-4b51-a741-5629f1f825fb-metrics-certs\") pod \"openstack-operator-controller-manager-678c445b7b-gz66r\" (UID: \"31129f99-dd83-4b51-a741-5629f1f825fb\") " pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:07 crc kubenswrapper[4682]: I1210 11:04:07.649360 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:13 crc kubenswrapper[4682]: I1210 11:04:13.322103 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" event={"ID":"35d24c54-906b-406e-b03e-9fe2008fbb10","Type":"ContainerStarted","Data":"9d18ca8e7b96261806d473f9b169834e7280fdcc131b6b6d5652f47087309a8b"} Dec 10 11:04:16 crc kubenswrapper[4682]: I1210 11:04:16.515380 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" event={"ID":"c3051489-ad76-489d-b143-a913219881da","Type":"ContainerStarted","Data":"89060f576befd05eedd3309200f98d888d2783c66faeea51b63ade4f48777a0c"} Dec 10 11:04:16 crc kubenswrapper[4682]: I1210 11:04:16.519566 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" event={"ID":"3224e18d-9f3d-4c9c-abb9-eed4fa24989c","Type":"ContainerStarted","Data":"c2a7b8c3b6167cb3b70d93006143d72c7537727a37d3be02b5fc60aa1ab25f48"} Dec 10 11:04:17 crc kubenswrapper[4682]: I1210 11:04:17.546966 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" event={"ID":"0d277084-bc96-4bcb-a090-76ef7e2f385e","Type":"ContainerStarted","Data":"f4801049b80d1eafbd40eed3278d013b392a68b3b37c664b9a31e964093f58cd"} Dec 10 11:04:18 crc kubenswrapper[4682]: E1210 11:04:18.033434 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:18 crc kubenswrapper[4682]: E1210 11:04:18.033853 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hfvqr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-697fb699cf-ldjzz_openstack-operators(ecddf494-21c9-4fe4-9431-a61d9bc6ba0d): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 10 11:04:18 crc kubenswrapper[4682]: E1210 11:04:18.035244 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" podUID="ecddf494-21c9-4fe4-9431-a61d9bc6ba0d" Dec 10 11:04:18 crc kubenswrapper[4682]: I1210 11:04:18.607175 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" event={"ID":"0f6f4969-902f-44e4-a29e-fcb24ce0d7e4","Type":"ContainerStarted","Data":"1776c60d6e9327f47fc181b2f079c72e7039b4109f3313900f7382a09c783cb5"} Dec 10 11:04:18 crc kubenswrapper[4682]: I1210 11:04:18.611310 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" event={"ID":"028aa123-014b-4836-a8d8-e0acafea568f","Type":"ContainerStarted","Data":"4895364d3730ae7d37182f416d75006a1633bc768ae5c87a55db09159b8164b1"} Dec 10 11:04:18 crc kubenswrapper[4682]: I1210 11:04:18.611616 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:04:18 crc kubenswrapper[4682]: E1210 11:04:18.612498 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" podUID="ecddf494-21c9-4fe4-9431-a61d9bc6ba0d" Dec 10 11:04:18 crc kubenswrapper[4682]: I1210 11:04:18.614332 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" Dec 10 11:04:19 crc kubenswrapper[4682]: E1210 11:04:19.623217 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" podUID="ecddf494-21c9-4fe4-9431-a61d9bc6ba0d" Dec 10 11:04:19 crc kubenswrapper[4682]: I1210 11:04:19.936964 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r"] Dec 10 11:04:20 crc kubenswrapper[4682]: I1210 11:04:20.634288 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" event={"ID":"c69a769f-919b-4cf6-9957-a4cdc2a8f8d7","Type":"ContainerStarted","Data":"f9b5a9b7266c6f5cace6008930f9210aaa467a874a6e1a71a6786cabf7f0e668"} Dec 10 11:04:20 crc kubenswrapper[4682]: I1210 11:04:20.636487 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" event={"ID":"8f9fd772-c4a8-48d1-8294-c0572ad44506","Type":"ContainerStarted","Data":"bc2fd932d0396a5c7b149945736f167e23e0f89cdf1657900a0baa5756204eec"} Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.638049 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" podUID="ecddf494-21c9-4fe4-9431-a61d9bc6ba0d" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.779400 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.779543 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8f2kh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6c677c69b-j9rxl_openstack-operators(41b81f6b-1509-4330-b9b7-8692c065e8d0): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.780664 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.780664 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" podUID="41b81f6b-1509-4330-b9b7-8692c065e8d0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.780813 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-l9jln,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-79c8c4686c-5jw7k_openstack-operators(78994f55-53cc-46ce-a67f-8bcde14796f4): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.782439 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" podUID="78994f55-53cc-46ce-a67f-8bcde14796f4" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.786179 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying layer: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.786356 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4nlcw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-68c6d99b8f-s7vjn_openstack-operators(754f75d2-ce2a-4983-a82a-c62a2ffb2b04): ErrImagePull: rpc error: code = Canceled desc = copying layer: context canceled" logger="UnhandledError" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.787519 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying layer: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" podUID="754f75d2-ce2a-4983-a82a-c62a2ffb2b04" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.792858 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.793025 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r5l7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-mkfhq_openstack-operators(79c97552-a229-4d38-ac96-79c2ef3303bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.794183 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" podUID="79c97552-a229-4d38-ac96-79c2ef3303bf" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.807767 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.807898 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kdwvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-967d97867-cxbm8_openstack-operators(00c2e072-614d-483b-a9da-86f271a88095): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.809001 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" podUID="00c2e072-614d-483b-a9da-86f271a88095" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.835299 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.835486 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-md57n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-54d54d59bc-cjf8w_openstack-operators(c2238ca5-7b77-471b-a743-75e076a61ce1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:04:20 crc kubenswrapper[4682]: E1210 11:04:20.836938 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" podUID="c2238ca5-7b77-471b-a743-75e076a61ce1" Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.665373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" event={"ID":"4985e1e4-e9fa-406a-a744-45d9e9dc8135","Type":"ContainerStarted","Data":"71a7969c23b4a4e0738618f9d3bbcbb53a8bf9280df6aaa48023b27f99ffbf78"} Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.667769 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" event={"ID":"f76d2d54-5cd6-4e5c-b719-92117a1e6cb9","Type":"ContainerStarted","Data":"55f7c3bc7889e72e11dadaa8c404c3de3e745fc7c827c1dc5c28771504aa8ec4"} Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.670575 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" event={"ID":"31129f99-dd83-4b51-a741-5629f1f825fb","Type":"ContainerStarted","Data":"8f9877282a4e9ceb3dde166868c47658161a9b2ea0799d2541bbe3f8ec0d258d"} Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.670605 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" event={"ID":"31129f99-dd83-4b51-a741-5629f1f825fb","Type":"ContainerStarted","Data":"c04e8f382c396873b7a091b56cfd0de0d0d795d937ce288c59230abba66094bc"} Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.671323 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.671362 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.673307 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" Dec 10 11:04:21 crc kubenswrapper[4682]: I1210 11:04:21.674367 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" Dec 10 11:04:22 crc kubenswrapper[4682]: E1210 11:04:22.375713 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" podUID="091fd04a-949b-4f31-8c04-80402b84ac36" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.687612 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" event={"ID":"a4641319-ef96-4ffb-ac2e-a35154984ba8","Type":"ContainerStarted","Data":"b5b5018b4072cc79a48ad0d251c5d7bb9ee9e92353be8d8a52e807dfa2380f8f"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.693695 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" event={"ID":"091fd04a-949b-4f31-8c04-80402b84ac36","Type":"ContainerStarted","Data":"e86afbeea732cac0d2e2b10ea9fc196f9473c47be5977d56fd492f910210c413"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.698795 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" event={"ID":"c3051489-ad76-489d-b143-a913219881da","Type":"ContainerStarted","Data":"83e03a15dd70c5cdf1b8bfe911bdad6a242b814a89d8139698e35a33d7fb426e"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.698990 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.700567 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" event={"ID":"8f9fd772-c4a8-48d1-8294-c0572ad44506","Type":"ContainerStarted","Data":"b5cf94899cb566ac7d6b263a980e0fb5c880728ad93a52d9bf0ad3b34fab9317"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.700926 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.701137 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.703959 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" event={"ID":"41b81f6b-1509-4330-b9b7-8692c065e8d0","Type":"ContainerStarted","Data":"9df12fce8a43c20b34d45eac451bb15a7c6d03e1d34b18529ef54359c20e1762"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.750593 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" event={"ID":"78994f55-53cc-46ce-a67f-8bcde14796f4","Type":"ContainerStarted","Data":"6f5b0dbda744a63b9a33dbb6150c69a0185f21d78aeda818dbda7f8756a3db62"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.751550 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.768869 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" event={"ID":"539df8ed-9553-4ce0-be01-36055d2ab100","Type":"ContainerStarted","Data":"0eb46a72f339a945889a153f84acace2a4a379baf190f14e838c69044398d179"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.771123 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.772648 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.776692 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.790752 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-j9rxl" podStartSLOduration=32.371708615 podStartE2EDuration="48.790737218s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.291299963 +0000 UTC m=+1097.611510713" lastFinishedPulling="2025-12-10 11:03:53.710328566 +0000 UTC m=+1114.030539316" observedRunningTime="2025-12-10 11:04:22.755498813 +0000 UTC m=+1143.075709583" watchObservedRunningTime="2025-12-10 11:04:22.790737218 +0000 UTC m=+1143.110947968" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.791201 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" podStartSLOduration=4.956259087 podStartE2EDuration="47.791196263s" podCreationTimestamp="2025-12-10 11:03:35 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.229285429 +0000 UTC m=+1098.549496179" lastFinishedPulling="2025-12-10 11:04:21.064222605 +0000 UTC m=+1141.384433355" observedRunningTime="2025-12-10 11:04:22.784821111 +0000 UTC m=+1143.105031861" watchObservedRunningTime="2025-12-10 11:04:22.791196263 +0000 UTC m=+1143.111407013" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.818152 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-mnl7f" podStartSLOduration=5.622383765 podStartE2EDuration="48.818136526s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.850920348 +0000 UTC m=+1098.171131098" lastFinishedPulling="2025-12-10 11:04:21.046673099 +0000 UTC m=+1141.366883859" observedRunningTime="2025-12-10 11:04:22.81635811 +0000 UTC m=+1143.136568880" watchObservedRunningTime="2025-12-10 11:04:22.818136526 +0000 UTC m=+1143.138347276" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.827448 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" event={"ID":"c69a769f-919b-4cf6-9957-a4cdc2a8f8d7","Type":"ContainerStarted","Data":"e9d001937f4738a614313d37f7af9ac8d5feafdd58dcec6b3968ba4468c77c3d"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.828169 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.936765 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" event={"ID":"0d277084-bc96-4bcb-a090-76ef7e2f385e","Type":"ContainerStarted","Data":"57efd7843df5d1f1bbf531aae61036f5f4dc655feb1b032a16d1e2d5b2141779"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.937513 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.944865 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5jw7k" podStartSLOduration=33.196278504 podStartE2EDuration="48.944847209s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.958323143 +0000 UTC m=+1098.278533893" lastFinishedPulling="2025-12-10 11:03:53.706891848 +0000 UTC m=+1114.027102598" observedRunningTime="2025-12-10 11:04:22.860812328 +0000 UTC m=+1143.181023098" watchObservedRunningTime="2025-12-10 11:04:22.944847209 +0000 UTC m=+1143.265057959" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.948221 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-26vbl" podStartSLOduration=5.410383365 podStartE2EDuration="48.948203116s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.56131464 +0000 UTC m=+1097.881525390" lastFinishedPulling="2025-12-10 11:04:21.099134391 +0000 UTC m=+1141.419345141" observedRunningTime="2025-12-10 11:04:22.944130116 +0000 UTC m=+1143.264340866" watchObservedRunningTime="2025-12-10 11:04:22.948203116 +0000 UTC m=+1143.268413866" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.950744 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.951493 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" event={"ID":"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f","Type":"ContainerStarted","Data":"ec679ef73a5981904ed2c60729a5b83b587a24250d89beebc964b70e5e75ff68"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.952876 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" event={"ID":"028aa123-014b-4836-a8d8-e0acafea568f","Type":"ContainerStarted","Data":"3b31cd305d6cfe9da385525d20e1df74817eca289c7616e0f09923dbfdd8982a"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.972877 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.982719 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" event={"ID":"35d24c54-906b-406e-b03e-9fe2008fbb10","Type":"ContainerStarted","Data":"baf29193cf4f8ec684ed88c79ceb5887993d6134ee448db8fe2d7a2d4802e483"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.983592 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.986716 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" event={"ID":"3224e18d-9f3d-4c9c-abb9-eed4fa24989c","Type":"ContainerStarted","Data":"e3d31c972a64c0734a816622196a799dde2d8f2d8f516606ba0fb75d1bc200fc"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.987883 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.990236 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" event={"ID":"f76d2d54-5cd6-4e5c-b719-92117a1e6cb9","Type":"ContainerStarted","Data":"0578841eda215d1958adde0e4f7a23ec7476200d61a4b39e6d2a769a4bc4f56e"} Dec 10 11:04:22 crc kubenswrapper[4682]: I1210 11:04:22.990800 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:22.993859 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:22.994081 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.023382 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" event={"ID":"754f75d2-ce2a-4983-a82a-c62a2ffb2b04","Type":"ContainerStarted","Data":"92c5d2b2797aa5655f04e8a98ffd376125f47620adece150b0996c4bd5468236"} Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.032662 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-lttng" podStartSLOduration=6.143312879 podStartE2EDuration="49.03263652s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.211970198 +0000 UTC m=+1098.532180948" lastFinishedPulling="2025-12-10 11:04:21.101293839 +0000 UTC m=+1141.421504589" observedRunningTime="2025-12-10 11:04:22.981824711 +0000 UTC m=+1143.302035471" watchObservedRunningTime="2025-12-10 11:04:23.03263652 +0000 UTC m=+1143.352847280" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.064352 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" event={"ID":"0f6f4969-902f-44e4-a29e-fcb24ce0d7e4","Type":"ContainerStarted","Data":"e4c8ead2ac7025f4ad43002042b88487e73539b8f2e7daf80501254fcbf8bb3d"} Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.064402 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.077846 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.136611 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" podStartSLOduration=6.124000686 podStartE2EDuration="49.136581782s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.036666685 +0000 UTC m=+1098.356877435" lastFinishedPulling="2025-12-10 11:04:21.049247781 +0000 UTC m=+1141.369458531" observedRunningTime="2025-12-10 11:04:23.045461976 +0000 UTC m=+1143.365672736" watchObservedRunningTime="2025-12-10 11:04:23.136581782 +0000 UTC m=+1143.456792532" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.144052 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-b28jt" podStartSLOduration=5.359236769 podStartE2EDuration="49.144034508s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.372065051 +0000 UTC m=+1097.692275801" lastFinishedPulling="2025-12-10 11:04:21.15686279 +0000 UTC m=+1141.477073540" observedRunningTime="2025-12-10 11:04:23.077411738 +0000 UTC m=+1143.397622508" watchObservedRunningTime="2025-12-10 11:04:23.144034508 +0000 UTC m=+1143.464245258" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.147178 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" event={"ID":"b92a5136-09a9-49c1-ad89-bf46bccb9d45","Type":"ContainerStarted","Data":"c1d6e3e344bfe335ae80a9bbc6b02dce5287180a5fc4de63bcbf54eb264d92cd"} Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.147216 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.148720 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.153107 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.225888 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-lddjn" podStartSLOduration=6.337009302 podStartE2EDuration="49.225864829s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.211683559 +0000 UTC m=+1098.531894299" lastFinishedPulling="2025-12-10 11:04:21.100539076 +0000 UTC m=+1141.420749826" observedRunningTime="2025-12-10 11:04:23.207148427 +0000 UTC m=+1143.527359177" watchObservedRunningTime="2025-12-10 11:04:23.225864829 +0000 UTC m=+1143.546075599" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.243366 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" podStartSLOduration=22.863535817 podStartE2EDuration="48.243348363s" podCreationTimestamp="2025-12-10 11:03:35 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.23307864 +0000 UTC m=+1098.553289390" lastFinishedPulling="2025-12-10 11:04:03.612891186 +0000 UTC m=+1123.933101936" observedRunningTime="2025-12-10 11:04:23.239056068 +0000 UTC m=+1143.559266828" watchObservedRunningTime="2025-12-10 11:04:23.243348363 +0000 UTC m=+1143.563559113" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.373328 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" podStartSLOduration=6.30881094 podStartE2EDuration="49.37330831s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.036672185 +0000 UTC m=+1098.356882935" lastFinishedPulling="2025-12-10 11:04:21.101169555 +0000 UTC m=+1141.421380305" observedRunningTime="2025-12-10 11:04:23.358822011 +0000 UTC m=+1143.679032761" watchObservedRunningTime="2025-12-10 11:04:23.37330831 +0000 UTC m=+1143.693519060" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.375905 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-s7vjn" podStartSLOduration=32.482884636 podStartE2EDuration="49.375894422s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.511427384 +0000 UTC m=+1097.831638134" lastFinishedPulling="2025-12-10 11:03:54.40443717 +0000 UTC m=+1114.724647920" observedRunningTime="2025-12-10 11:04:23.291739016 +0000 UTC m=+1143.611949786" watchObservedRunningTime="2025-12-10 11:04:23.375894422 +0000 UTC m=+1143.696105172" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.422959 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-9rc5v" podStartSLOduration=6.219026425 podStartE2EDuration="49.422939121s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.872501215 +0000 UTC m=+1098.192711965" lastFinishedPulling="2025-12-10 11:04:21.076413901 +0000 UTC m=+1141.396624661" observedRunningTime="2025-12-10 11:04:23.397681712 +0000 UTC m=+1143.717892492" watchObservedRunningTime="2025-12-10 11:04:23.422939121 +0000 UTC m=+1143.743149871" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.454444 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" podStartSLOduration=48.454424289 podStartE2EDuration="48.454424289s" podCreationTimestamp="2025-12-10 11:03:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:04:23.444260556 +0000 UTC m=+1143.764471306" watchObservedRunningTime="2025-12-10 11:04:23.454424289 +0000 UTC m=+1143.774635039" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.473216 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-76vm7" podStartSLOduration=8.014853602 podStartE2EDuration="48.473195633s" podCreationTimestamp="2025-12-10 11:03:35 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.84280604 +0000 UTC m=+1098.163016780" lastFinishedPulling="2025-12-10 11:04:18.301148061 +0000 UTC m=+1138.621358811" observedRunningTime="2025-12-10 11:04:23.470846219 +0000 UTC m=+1143.791056969" watchObservedRunningTime="2025-12-10 11:04:23.473195633 +0000 UTC m=+1143.793406383" Dec 10 11:04:23 crc kubenswrapper[4682]: I1210 11:04:23.515930 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-6lth6" podStartSLOduration=5.036530566 podStartE2EDuration="49.515914417s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:36.622618941 +0000 UTC m=+1096.942829701" lastFinishedPulling="2025-12-10 11:04:21.102002802 +0000 UTC m=+1141.422213552" observedRunningTime="2025-12-10 11:04:23.514344516 +0000 UTC m=+1143.834555286" watchObservedRunningTime="2025-12-10 11:04:23.515914417 +0000 UTC m=+1143.836125167" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.167157 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" event={"ID":"79c97552-a229-4d38-ac96-79c2ef3303bf","Type":"ContainerStarted","Data":"4b06f20e9f599d3ca40c18a084af4d57b4c5fe6a72efbfbd2ce09f00ea5bfda6"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.167203 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" event={"ID":"79c97552-a229-4d38-ac96-79c2ef3303bf","Type":"ContainerStarted","Data":"b65e73edc94c70bd9e83e3b44283b6ee7ec858eb799600338bd54ec717f21186"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.167360 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.168916 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" event={"ID":"84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f","Type":"ContainerStarted","Data":"31a077afa9ddb2a4eb89f2c2a24292bfbbe876457b4ead73fc06ee24d047d6e8"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.169011 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.170596 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" event={"ID":"00c2e072-614d-483b-a9da-86f271a88095","Type":"ContainerStarted","Data":"0890f41ec7e1a43909192826fbfbc9a5a0191c449b8132efdfa8a56c52e695a7"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.170628 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" event={"ID":"00c2e072-614d-483b-a9da-86f271a88095","Type":"ContainerStarted","Data":"d59a693671336d664775ac55653bc7ee448f2046ab301142a6375606afaaa4aa"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.170761 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.172562 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" event={"ID":"a4641319-ef96-4ffb-ac2e-a35154984ba8","Type":"ContainerStarted","Data":"3e29ae8a2dcbe23df4d093608dcf7c9551964516e23c22bd79865fe42d1c6c84"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.172661 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.175057 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" event={"ID":"091fd04a-949b-4f31-8c04-80402b84ac36","Type":"ContainerStarted","Data":"1d99f8c1eb30679ea0b1906544b90dbed99978af18e4ac9154b36e02a88ecc3e"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.175149 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.179035 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" event={"ID":"c2238ca5-7b77-471b-a743-75e076a61ce1","Type":"ContainerStarted","Data":"5e8baba37c3cebae78ecd527818691b23d1ae4fc2a07278646fe9880e64eec42"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.179076 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" event={"ID":"c2238ca5-7b77-471b-a743-75e076a61ce1","Type":"ContainerStarted","Data":"15e3471ac31cc36d213480dd5d7b4138a451454790e2c0b5dd603ca4441b0337"} Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.181578 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-tv4q2" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.183698 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-v9vrr" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.186097 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-fz6ds" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.199784 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" podStartSLOduration=5.036504256 podStartE2EDuration="50.199767266s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.511023541 +0000 UTC m=+1097.831234291" lastFinishedPulling="2025-12-10 11:04:22.674286551 +0000 UTC m=+1142.994497301" observedRunningTime="2025-12-10 11:04:24.195664336 +0000 UTC m=+1144.515875086" watchObservedRunningTime="2025-12-10 11:04:24.199767266 +0000 UTC m=+1144.519978016" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.319042 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" podStartSLOduration=4.736355829 podStartE2EDuration="50.319025943s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.907369543 +0000 UTC m=+1098.227580293" lastFinishedPulling="2025-12-10 11:04:23.490039657 +0000 UTC m=+1143.810250407" observedRunningTime="2025-12-10 11:04:24.313341163 +0000 UTC m=+1144.633551913" watchObservedRunningTime="2025-12-10 11:04:24.319025943 +0000 UTC m=+1144.639236683" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.373613 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" podStartSLOduration=5.804914848 podStartE2EDuration="50.373595752s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:38.019515539 +0000 UTC m=+1098.339726289" lastFinishedPulling="2025-12-10 11:04:22.588196443 +0000 UTC m=+1142.908407193" observedRunningTime="2025-12-10 11:04:24.343275281 +0000 UTC m=+1144.663486031" watchObservedRunningTime="2025-12-10 11:04:24.373595752 +0000 UTC m=+1144.693806502" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.418763 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" podStartSLOduration=5.939460043 podStartE2EDuration="50.418745061s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.977600826 +0000 UTC m=+1098.297811576" lastFinishedPulling="2025-12-10 11:04:22.456885844 +0000 UTC m=+1142.777096594" observedRunningTime="2025-12-10 11:04:24.415195369 +0000 UTC m=+1144.735406119" watchObservedRunningTime="2025-12-10 11:04:24.418745061 +0000 UTC m=+1144.738955801" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.436577 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" podStartSLOduration=32.7640027 podStartE2EDuration="50.436558456s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:04:03.10469503 +0000 UTC m=+1123.424905780" lastFinishedPulling="2025-12-10 11:04:20.777250786 +0000 UTC m=+1141.097461536" observedRunningTime="2025-12-10 11:04:24.432432195 +0000 UTC m=+1144.752642935" watchObservedRunningTime="2025-12-10 11:04:24.436558456 +0000 UTC m=+1144.756769206" Dec 10 11:04:24 crc kubenswrapper[4682]: I1210 11:04:24.476663 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" podStartSLOduration=32.654631827 podStartE2EDuration="50.476647736s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:04:03.104835395 +0000 UTC m=+1123.425046145" lastFinishedPulling="2025-12-10 11:04:20.926851304 +0000 UTC m=+1141.247062054" observedRunningTime="2025-12-10 11:04:24.467536207 +0000 UTC m=+1144.787746957" watchObservedRunningTime="2025-12-10 11:04:24.476647736 +0000 UTC m=+1144.796858486" Dec 10 11:04:25 crc kubenswrapper[4682]: I1210 11:04:25.728572 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:04:27 crc kubenswrapper[4682]: I1210 11:04:27.655904 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-678c445b7b-gz66r" Dec 10 11:04:30 crc kubenswrapper[4682]: I1210 11:04:30.630716 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-lffwd" Dec 10 11:04:31 crc kubenswrapper[4682]: I1210 11:04:31.028940 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdgsbh" Dec 10 11:04:34 crc kubenswrapper[4682]: I1210 11:04:34.295268 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" event={"ID":"ecddf494-21c9-4fe4-9431-a61d9bc6ba0d","Type":"ContainerStarted","Data":"57cc8a76657b9b5d39e6202a5bee02bb55cfd62cd6637e71013bc3dffbbadd23"} Dec 10 11:04:34 crc kubenswrapper[4682]: I1210 11:04:34.313924 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-ldjzz" podStartSLOduration=43.279323218 podStartE2EDuration="1m0.313906427s" podCreationTimestamp="2025-12-10 11:03:34 +0000 UTC" firstStartedPulling="2025-12-10 11:03:37.369852731 +0000 UTC m=+1097.690063481" lastFinishedPulling="2025-12-10 11:03:54.40443594 +0000 UTC m=+1114.724646690" observedRunningTime="2025-12-10 11:04:34.310626293 +0000 UTC m=+1154.630837063" watchObservedRunningTime="2025-12-10 11:04:34.313906427 +0000 UTC m=+1154.634117187" Dec 10 11:04:35 crc kubenswrapper[4682]: I1210 11:04:35.049776 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-967d97867-cxbm8" Dec 10 11:04:35 crc kubenswrapper[4682]: I1210 11:04:35.096059 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-mkfhq" Dec 10 11:04:35 crc kubenswrapper[4682]: I1210 11:04:35.374803 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-5k89m" Dec 10 11:04:35 crc kubenswrapper[4682]: I1210 11:04:35.734101 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" Dec 10 11:04:35 crc kubenswrapper[4682]: I1210 11:04:35.819363 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-5x7z6" Dec 10 11:04:36 crc kubenswrapper[4682]: I1210 11:04:36.478407 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:04:36 crc kubenswrapper[4682]: I1210 11:04:36.478484 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:04:56 crc kubenswrapper[4682]: I1210 11:04:56.811660 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" podUID="c2238ca5-7b77-471b-a743-75e076a61ce1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:04:56 crc kubenswrapper[4682]: I1210 11:04:56.811680 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-54d54d59bc-cjf8w" podUID="c2238ca5-7b77-471b-a743-75e076a61ce1" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.97:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.174850 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9jwk9"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.176194 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.183724 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.183914 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.184076 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-22dhl" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.184455 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.231415 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9jwk9"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.319357 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-lns5b"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.320765 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.326785 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-lns5b"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.329326 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.331512 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d54a509-3920-43fa-b916-76ca2c0366ff-config\") pod \"dnsmasq-dns-675f4bcbfc-9jwk9\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.331620 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsvhq\" (UniqueName: \"kubernetes.io/projected/4d54a509-3920-43fa-b916-76ca2c0366ff-kube-api-access-vsvhq\") pod \"dnsmasq-dns-675f4bcbfc-9jwk9\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.433296 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.433353 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d54a509-3920-43fa-b916-76ca2c0366ff-config\") pod \"dnsmasq-dns-675f4bcbfc-9jwk9\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.433414 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdzg4\" (UniqueName: \"kubernetes.io/projected/5b4961c9-bbce-41a1-815e-9e953082a574-kube-api-access-bdzg4\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.433497 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsvhq\" (UniqueName: \"kubernetes.io/projected/4d54a509-3920-43fa-b916-76ca2c0366ff-kube-api-access-vsvhq\") pod \"dnsmasq-dns-675f4bcbfc-9jwk9\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.433522 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-config\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.434984 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d54a509-3920-43fa-b916-76ca2c0366ff-config\") pod \"dnsmasq-dns-675f4bcbfc-9jwk9\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.458233 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsvhq\" (UniqueName: \"kubernetes.io/projected/4d54a509-3920-43fa-b916-76ca2c0366ff-kube-api-access-vsvhq\") pod \"dnsmasq-dns-675f4bcbfc-9jwk9\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.502712 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.536166 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.536380 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdzg4\" (UniqueName: \"kubernetes.io/projected/5b4961c9-bbce-41a1-815e-9e953082a574-kube-api-access-bdzg4\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.536492 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-config\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.537442 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-config\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.538042 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.575541 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdzg4\" (UniqueName: \"kubernetes.io/projected/5b4961c9-bbce-41a1-815e-9e953082a574-kube-api-access-bdzg4\") pod \"dnsmasq-dns-78dd6ddcc-lns5b\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.645852 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.785705 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9jwk9"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.884291 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9jwk9"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.908487 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-x9mcf"] Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.909766 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:58 crc kubenswrapper[4682]: I1210 11:04:58.934590 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-x9mcf"] Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.044073 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-dns-svc\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.044132 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvbkj\" (UniqueName: \"kubernetes.io/projected/0dd38c46-9cd1-4ee9-921a-956470e90539-kube-api-access-wvbkj\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.044159 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-config\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.145573 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-config\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.145736 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-dns-svc\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.145771 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvbkj\" (UniqueName: \"kubernetes.io/projected/0dd38c46-9cd1-4ee9-921a-956470e90539-kube-api-access-wvbkj\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.147176 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-dns-svc\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.147183 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-config\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.174656 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvbkj\" (UniqueName: \"kubernetes.io/projected/0dd38c46-9cd1-4ee9-921a-956470e90539-kube-api-access-wvbkj\") pod \"dnsmasq-dns-666b6646f7-x9mcf\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.208942 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-x9mcf"] Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.209444 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:04:59 crc kubenswrapper[4682]: W1210 11:04:59.236627 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b4961c9_bbce_41a1_815e_9e953082a574.slice/crio-bc2e34fdb268718c57a5e4f1a6b03a2d2bc2c307e0c2719845c142a9d484bd1e WatchSource:0}: Error finding container bc2e34fdb268718c57a5e4f1a6b03a2d2bc2c307e0c2719845c142a9d484bd1e: Status 404 returned error can't find the container with id bc2e34fdb268718c57a5e4f1a6b03a2d2bc2c307e0c2719845c142a9d484bd1e Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.242702 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-lns5b"] Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.269731 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-j6d45"] Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.271407 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.279791 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-j6d45"] Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.453724 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.454085 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkrgx\" (UniqueName: \"kubernetes.io/projected/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-kube-api-access-qkrgx\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.454117 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-config\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.525772 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-x9mcf"] Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.555636 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.555712 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkrgx\" (UniqueName: \"kubernetes.io/projected/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-kube-api-access-qkrgx\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.555743 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-config\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.556727 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.556888 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-config\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.574351 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkrgx\" (UniqueName: \"kubernetes.io/projected/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-kube-api-access-qkrgx\") pod \"dnsmasq-dns-57d769cc4f-j6d45\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.606115 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.616871 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" event={"ID":"0dd38c46-9cd1-4ee9-921a-956470e90539","Type":"ContainerStarted","Data":"7774409bed762879a024a4638b5e7018584d622d9f5fe92ac487a01b0b8f9061"} Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.621463 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" event={"ID":"4d54a509-3920-43fa-b916-76ca2c0366ff","Type":"ContainerStarted","Data":"47f32cc3dfc36254a443eff838df12653d0f64de6566ad5c26f697aad8117728"} Dec 10 11:04:59 crc kubenswrapper[4682]: I1210 11:04:59.626403 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" event={"ID":"5b4961c9-bbce-41a1-815e-9e953082a574","Type":"ContainerStarted","Data":"bc2e34fdb268718c57a5e4f1a6b03a2d2bc2c307e0c2719845c142a9d484bd1e"} Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.070573 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.076679 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.084987 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.085099 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.085250 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.085351 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.085393 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.085443 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zrlpn" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.085859 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.093035 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168362 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168449 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168494 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7362d622-686c-48e5-b0de-562fae10bc35-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168519 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168550 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2272\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-kube-api-access-l2272\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168590 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168623 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7362d622-686c-48e5-b0de-562fae10bc35-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168683 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-config-data\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168715 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168758 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.168786 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.187419 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-j6d45"] Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270311 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270360 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7362d622-686c-48e5-b0de-562fae10bc35-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270387 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270421 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2272\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-kube-api-access-l2272\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270460 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270510 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7362d622-686c-48e5-b0de-562fae10bc35-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270537 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-config-data\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270559 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270594 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270618 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.270654 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.271101 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.271772 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.273433 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.274364 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-config-data\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.277978 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7362d622-686c-48e5-b0de-562fae10bc35-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.279168 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.279358 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.279432 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ae9f9a2cec2d07eb6ee17a3db3aa6e274ca33ba3628d7fecb8aaa98a76caa599/globalmount\"" pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.283150 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.284410 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.293501 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7362d622-686c-48e5-b0de-562fae10bc35-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.304412 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2272\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-kube-api-access-l2272\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.332444 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.364909 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.369945 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.372842 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-87b4g" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.373036 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.373212 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.373262 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.373758 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.373814 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.375967 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.429719 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.473922 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486419 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486510 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211ac37-0b53-466f-ad83-7062f681c32b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486555 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211ac37-0b53-466f-ad83-7062f681c32b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486589 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486622 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486652 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486677 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486729 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486829 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486887 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-765cp\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-kube-api-access-765cp\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.486927 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.587926 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588004 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211ac37-0b53-466f-ad83-7062f681c32b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588072 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211ac37-0b53-466f-ad83-7062f681c32b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588104 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588131 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588165 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588219 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588257 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588325 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588399 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-765cp\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-kube-api-access-765cp\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.588430 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.593107 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.593693 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.593918 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.594878 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.597815 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.615785 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9175dee665a5343fd54b62099e2589ce8aeecb32571dc715a22448f6bd4b0462/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.598625 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.615697 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211ac37-0b53-466f-ad83-7062f681c32b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.616115 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.598241 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211ac37-0b53-466f-ad83-7062f681c32b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.616969 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.620930 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-765cp\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-kube-api-access-765cp\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.669214 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" event={"ID":"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8","Type":"ContainerStarted","Data":"b2edc23f4829464b3ca5aed596b32fc7293cdde5135ee9c40cf674d54eaa2319"} Dec 10 11:05:00 crc kubenswrapper[4682]: I1210 11:05:00.727256 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.213166 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.550656 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.557394 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.566771 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.569120 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-x6b79" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.569192 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.569137 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.576134 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.580171 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.727397 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743206 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743255 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743280 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhh5c\" (UniqueName: \"kubernetes.io/projected/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-kube-api-access-qhh5c\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743315 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743333 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-config-data-default\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743363 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743386 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ddd433ab-a938-4388-9bc2-a177d2475646\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ddd433ab-a938-4388-9bc2-a177d2475646\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.743412 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-kolla-config\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847382 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847438 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847528 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhh5c\" (UniqueName: \"kubernetes.io/projected/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-kube-api-access-qhh5c\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847601 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847630 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-config-data-default\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847673 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847705 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ddd433ab-a938-4388-9bc2-a177d2475646\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ddd433ab-a938-4388-9bc2-a177d2475646\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.847738 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-kolla-config\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.848787 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-kolla-config\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.848845 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-config-data-default\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.848841 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.850703 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.855721 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.856572 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.896516 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhh5c\" (UniqueName: \"kubernetes.io/projected/1b6b3db6-e7bd-4c87-a35a-1f398c40436e-kube-api-access-qhh5c\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.949339 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.949396 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ddd433ab-a938-4388-9bc2-a177d2475646\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ddd433ab-a938-4388-9bc2-a177d2475646\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/41780b9dc4d6a874ecb0fe24c72f09945e9bcc92698736e6976672971851ab36/globalmount\"" pod="openstack/openstack-galera-0" Dec 10 11:05:01 crc kubenswrapper[4682]: I1210 11:05:01.990708 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ddd433ab-a938-4388-9bc2-a177d2475646\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ddd433ab-a938-4388-9bc2-a177d2475646\") pod \"openstack-galera-0\" (UID: \"1b6b3db6-e7bd-4c87-a35a-1f398c40436e\") " pod="openstack/openstack-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.061445 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:05:02 crc kubenswrapper[4682]: W1210 11:05:02.068497 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc211ac37_0b53_466f_ad83_7062f681c32b.slice/crio-39eaab8e7c09a0655115f029b9346c4ecdec0aefbf80f7b00f0fce3e382dac6b WatchSource:0}: Error finding container 39eaab8e7c09a0655115f029b9346c4ecdec0aefbf80f7b00f0fce3e382dac6b: Status 404 returned error can't find the container with id 39eaab8e7c09a0655115f029b9346c4ecdec0aefbf80f7b00f0fce3e382dac6b Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.182645 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.652277 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.654168 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.658767 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.661949 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.662348 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-km6jh" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.665275 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.675485 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.715304 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c211ac37-0b53-466f-ad83-7062f681c32b","Type":"ContainerStarted","Data":"39eaab8e7c09a0655115f029b9346c4ecdec0aefbf80f7b00f0fce3e382dac6b"} Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.718166 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7362d622-686c-48e5-b0de-562fae10bc35","Type":"ContainerStarted","Data":"e1e5b2899b676f626bc1c8e9b9535c1807f07453d8f6cebfde6ef90716c4ac13"} Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763654 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9f85710-54c3-4f30-88f6-bb97f9a200e8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763720 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763770 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763795 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763820 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9f85710-54c3-4f30-88f6-bb97f9a200e8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763862 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld6l6\" (UniqueName: \"kubernetes.io/projected/d9f85710-54c3-4f30-88f6-bb97f9a200e8-kube-api-access-ld6l6\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763910 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d9f85710-54c3-4f30-88f6-bb97f9a200e8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.763951 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865495 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9f85710-54c3-4f30-88f6-bb97f9a200e8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865574 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865603 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865634 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865666 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9f85710-54c3-4f30-88f6-bb97f9a200e8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865704 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld6l6\" (UniqueName: \"kubernetes.io/projected/d9f85710-54c3-4f30-88f6-bb97f9a200e8-kube-api-access-ld6l6\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865752 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d9f85710-54c3-4f30-88f6-bb97f9a200e8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.865808 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.866355 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.866668 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.866675 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d9f85710-54c3-4f30-88f6-bb97f9a200e8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.867871 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.867910 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0d43e975110c8815506e6c34860d8c2f0cab9ffb607c3faea0839b548bf62642/globalmount\"" pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.868388 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d9f85710-54c3-4f30-88f6-bb97f9a200e8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.875500 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9f85710-54c3-4f30-88f6-bb97f9a200e8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.880297 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9f85710-54c3-4f30-88f6-bb97f9a200e8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.885136 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld6l6\" (UniqueName: \"kubernetes.io/projected/d9f85710-54c3-4f30-88f6-bb97f9a200e8-kube-api-access-ld6l6\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.927977 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc165b38-ef46-4605-9c50-a4994c49c36c\") pod \"openstack-cell1-galera-0\" (UID: \"d9f85710-54c3-4f30-88f6-bb97f9a200e8\") " pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.946593 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 10 11:05:02 crc kubenswrapper[4682]: I1210 11:05:02.984819 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.203974 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.205198 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.209270 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.210847 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qs5zv" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.211095 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.240570 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.300727 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a61a31-230d-455f-b27a-87760ae46c25-memcached-tls-certs\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.300832 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a61a31-230d-455f-b27a-87760ae46c25-combined-ca-bundle\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.300932 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/63a61a31-230d-455f-b27a-87760ae46c25-kolla-config\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.301144 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a61a31-230d-455f-b27a-87760ae46c25-config-data\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.301179 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-798xq\" (UniqueName: \"kubernetes.io/projected/63a61a31-230d-455f-b27a-87760ae46c25-kube-api-access-798xq\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.402201 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a61a31-230d-455f-b27a-87760ae46c25-combined-ca-bundle\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.402262 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/63a61a31-230d-455f-b27a-87760ae46c25-kolla-config\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.402317 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a61a31-230d-455f-b27a-87760ae46c25-config-data\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.402340 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-798xq\" (UniqueName: \"kubernetes.io/projected/63a61a31-230d-455f-b27a-87760ae46c25-kube-api-access-798xq\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.402389 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a61a31-230d-455f-b27a-87760ae46c25-memcached-tls-certs\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.403970 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a61a31-230d-455f-b27a-87760ae46c25-config-data\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.407750 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/63a61a31-230d-455f-b27a-87760ae46c25-kolla-config\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.411434 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a61a31-230d-455f-b27a-87760ae46c25-combined-ca-bundle\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.412899 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a61a31-230d-455f-b27a-87760ae46c25-memcached-tls-certs\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.420464 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-798xq\" (UniqueName: \"kubernetes.io/projected/63a61a31-230d-455f-b27a-87760ae46c25-kube-api-access-798xq\") pod \"memcached-0\" (UID: \"63a61a31-230d-455f-b27a-87760ae46c25\") " pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.558835 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.606958 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 11:05:03 crc kubenswrapper[4682]: I1210 11:05:03.736352 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1b6b3db6-e7bd-4c87-a35a-1f398c40436e","Type":"ContainerStarted","Data":"0afa169d446dd19a10af616357cc1fd1e76909dfad5b5403639b3a6022c7d739"} Dec 10 11:05:04 crc kubenswrapper[4682]: W1210 11:05:04.128621 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9f85710_54c3_4f30_88f6_bb97f9a200e8.slice/crio-7a7eadf38a5e13f664a93fa23a68d572b7e71e0d36f2abb0808b55bed0ccaf18 WatchSource:0}: Error finding container 7a7eadf38a5e13f664a93fa23a68d572b7e71e0d36f2abb0808b55bed0ccaf18: Status 404 returned error can't find the container with id 7a7eadf38a5e13f664a93fa23a68d572b7e71e0d36f2abb0808b55bed0ccaf18 Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.708522 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.769403 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"63a61a31-230d-455f-b27a-87760ae46c25","Type":"ContainerStarted","Data":"5fed0df673a3fd2fb36a7cb8fc67905610c325fa924c61e59deb63c7f73e1b66"} Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.775739 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d9f85710-54c3-4f30-88f6-bb97f9a200e8","Type":"ContainerStarted","Data":"7a7eadf38a5e13f664a93fa23a68d572b7e71e0d36f2abb0808b55bed0ccaf18"} Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.935453 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.936650 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.944986 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-q9qnv" Dec 10 11:05:04 crc kubenswrapper[4682]: I1210 11:05:04.951399 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.038436 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsnpq\" (UniqueName: \"kubernetes.io/projected/af0ab072-8822-403b-ac67-8689937752bd-kube-api-access-vsnpq\") pod \"kube-state-metrics-0\" (UID: \"af0ab072-8822-403b-ac67-8689937752bd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.141263 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsnpq\" (UniqueName: \"kubernetes.io/projected/af0ab072-8822-403b-ac67-8689937752bd-kube-api-access-vsnpq\") pod \"kube-state-metrics-0\" (UID: \"af0ab072-8822-403b-ac67-8689937752bd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.192916 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsnpq\" (UniqueName: \"kubernetes.io/projected/af0ab072-8822-403b-ac67-8689937752bd-kube-api-access-vsnpq\") pod \"kube-state-metrics-0\" (UID: \"af0ab072-8822-403b-ac67-8689937752bd\") " pod="openstack/kube-state-metrics-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.271378 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.651952 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.653918 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.657603 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.657885 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-24l75" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.658009 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.658664 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.658758 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.668407 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753557 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/55723944-339e-4ed6-9159-9696ca1debeb-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753616 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/55723944-339e-4ed6-9159-9696ca1debeb-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753642 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753679 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753709 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf2lq\" (UniqueName: \"kubernetes.io/projected/55723944-339e-4ed6-9159-9696ca1debeb-kube-api-access-nf2lq\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753732 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.753752 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/55723944-339e-4ed6-9159-9696ca1debeb-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855554 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855615 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf2lq\" (UniqueName: \"kubernetes.io/projected/55723944-339e-4ed6-9159-9696ca1debeb-kube-api-access-nf2lq\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855644 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855667 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/55723944-339e-4ed6-9159-9696ca1debeb-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855719 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/55723944-339e-4ed6-9159-9696ca1debeb-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855751 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/55723944-339e-4ed6-9159-9696ca1debeb-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.855773 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.866895 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.870557 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/55723944-339e-4ed6-9159-9696ca1debeb-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.870941 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/55723944-339e-4ed6-9159-9696ca1debeb-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.877820 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/55723944-339e-4ed6-9159-9696ca1debeb-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.866943 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.892598 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf2lq\" (UniqueName: \"kubernetes.io/projected/55723944-339e-4ed6-9159-9696ca1debeb-kube-api-access-nf2lq\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.901425 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/55723944-339e-4ed6-9159-9696ca1debeb-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"55723944-339e-4ed6-9159-9696ca1debeb\") " pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.940028 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:05:05 crc kubenswrapper[4682]: I1210 11:05:05.990105 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.265537 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.278616 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.283462 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.284157 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-hfkrt" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.290157 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.290402 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.291742 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.308059 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.308913 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375317 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375366 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375401 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375441 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tswqz\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-kube-api-access-tswqz\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375532 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375700 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375759 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.375810 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477117 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477189 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477236 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477302 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tswqz\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-kube-api-access-tswqz\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477392 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477418 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477444 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.477501 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.479162 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.486116 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.486166 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.486212 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.487070 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a87379aa7407b916521958c3640f1cf7fec14e9fe313d9dbea26901e472ba31c"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.487129 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://a87379aa7407b916521958c3640f1cf7fec14e9fe313d9dbea26901e472ba31c" gracePeriod=600 Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.495851 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.495972 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.496550 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.497681 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.497728 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8e1318f74973771f6a5154c276126f7c0f974665b019a5b52a8da0720271e292/globalmount\"" pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.498711 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.500748 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.513311 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tswqz\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-kube-api-access-tswqz\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.537901 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:06 crc kubenswrapper[4682]: I1210 11:05:06.630511 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:05:07 crc kubenswrapper[4682]: I1210 11:05:07.807377 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="a87379aa7407b916521958c3640f1cf7fec14e9fe313d9dbea26901e472ba31c" exitCode=0 Dec 10 11:05:07 crc kubenswrapper[4682]: I1210 11:05:07.807420 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"a87379aa7407b916521958c3640f1cf7fec14e9fe313d9dbea26901e472ba31c"} Dec 10 11:05:07 crc kubenswrapper[4682]: I1210 11:05:07.807453 4682 scope.go:117] "RemoveContainer" containerID="cb1f236ceb4d4541ff9535181be092107ce5f587a0c363e01762746593060db5" Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.927704 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-w7jxw"] Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.934771 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.936792 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-4b2ch"] Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.937373 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.937423 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.938458 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.939113 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-n4brp" Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.954866 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w7jxw"] Dec 10 11:05:09 crc kubenswrapper[4682]: I1210 11:05:09.961398 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-4b2ch"] Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036093 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw2ll\" (UniqueName: \"kubernetes.io/projected/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-kube-api-access-cw2ll\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036135 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-ovn-controller-tls-certs\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036155 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-lib\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036170 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-run-ovn\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036189 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-run\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036220 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbqd6\" (UniqueName: \"kubernetes.io/projected/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-kube-api-access-fbqd6\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036254 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-log-ovn\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036274 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-log\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036300 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-combined-ca-bundle\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036324 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-etc-ovs\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036357 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-scripts\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036374 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-scripts\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.036395 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-run\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137443 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw2ll\" (UniqueName: \"kubernetes.io/projected/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-kube-api-access-cw2ll\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137503 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-ovn-controller-tls-certs\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137524 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-lib\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137543 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-run-ovn\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137564 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-run\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137597 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbqd6\" (UniqueName: \"kubernetes.io/projected/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-kube-api-access-fbqd6\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137635 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-log\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137652 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-log-ovn\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137677 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-combined-ca-bundle\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137693 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-etc-ovs\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137725 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-scripts\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137742 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-scripts\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.137765 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-run\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.138141 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-run-ovn\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.138278 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-run\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.138338 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-lib\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.138527 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-var-log-ovn\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.138862 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-log\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.139089 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-etc-ovs\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.139366 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-var-run\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.140615 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-scripts\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.141007 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-scripts\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.148172 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-combined-ca-bundle\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.154825 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw2ll\" (UniqueName: \"kubernetes.io/projected/8ee7ede4-07ea-4b15-88e7-15477c99d5ab-kube-api-access-cw2ll\") pod \"ovn-controller-ovs-4b2ch\" (UID: \"8ee7ede4-07ea-4b15-88e7-15477c99d5ab\") " pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.155012 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-ovn-controller-tls-certs\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.158179 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbqd6\" (UniqueName: \"kubernetes.io/projected/df9d7d76-fa02-41c5-b652-ea9b7b00bd00-kube-api-access-fbqd6\") pod \"ovn-controller-w7jxw\" (UID: \"df9d7d76-fa02-41c5-b652-ea9b7b00bd00\") " pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.269372 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.288760 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.828824 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.832236 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.832495 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.995018 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.995182 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.995225 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.995322 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 10 11:05:10 crc kubenswrapper[4682]: I1210 11:05:10.995401 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-6gcz5" Dec 10 11:05:11 crc kubenswrapper[4682]: W1210 11:05:11.069056 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf0ab072_8822_403b_ac67_8689937752bd.slice/crio-4f1c2d2fde62292e929e9364e36b6a02e75e86ecd644fa71418806e410a6fbb4 WatchSource:0}: Error finding container 4f1c2d2fde62292e929e9364e36b6a02e75e86ecd644fa71418806e410a6fbb4: Status 404 returned error can't find the container with id 4f1c2d2fde62292e929e9364e36b6a02e75e86ecd644fa71418806e410a6fbb4 Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.096269 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.096331 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.096386 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.097106 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.097184 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g4nk\" (UniqueName: \"kubernetes.io/projected/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-kube-api-access-2g4nk\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.097262 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.097288 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-config\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.097313 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199527 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199611 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g4nk\" (UniqueName: \"kubernetes.io/projected/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-kube-api-access-2g4nk\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199649 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199679 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-config\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199707 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199787 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199846 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.199879 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.201952 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-config\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.202084 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.202376 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.204116 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.206599 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.206639 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ec528eea363d34d6424bc78d5c67aa8875820bd805a4b874b8ed1fa9a38bb679/globalmount\"" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.221672 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.237458 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.242971 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g4nk\" (UniqueName: \"kubernetes.io/projected/97a3c791-f1b7-4665-ae8b-fa87d1ee73e1-kube-api-access-2g4nk\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.304431 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3b43d775-2d05-4bcd-af87-e6030e4d010c\") pod \"ovsdbserver-nb-0\" (UID: \"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1\") " pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:11 crc kubenswrapper[4682]: I1210 11:05:11.323701 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:12 crc kubenswrapper[4682]: I1210 11:05:12.021247 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af0ab072-8822-403b-ac67-8689937752bd","Type":"ContainerStarted","Data":"4f1c2d2fde62292e929e9364e36b6a02e75e86ecd644fa71418806e410a6fbb4"} Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.267938 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.269510 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.275722 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.275947 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-z6x7q" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.276116 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.276369 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.282405 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372656 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13b6a06f-420a-420d-8a7c-5a80d312ec79-config\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372711 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372741 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/13b6a06f-420a-420d-8a7c-5a80d312ec79-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372769 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372797 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372835 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrhpc\" (UniqueName: \"kubernetes.io/projected/13b6a06f-420a-420d-8a7c-5a80d312ec79-kube-api-access-mrhpc\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372865 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.372883 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/13b6a06f-420a-420d-8a7c-5a80d312ec79-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.473855 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrhpc\" (UniqueName: \"kubernetes.io/projected/13b6a06f-420a-420d-8a7c-5a80d312ec79-kube-api-access-mrhpc\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.473916 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.473939 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/13b6a06f-420a-420d-8a7c-5a80d312ec79-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.473983 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13b6a06f-420a-420d-8a7c-5a80d312ec79-config\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.474008 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.474032 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/13b6a06f-420a-420d-8a7c-5a80d312ec79-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.474057 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.474088 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.475235 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/13b6a06f-420a-420d-8a7c-5a80d312ec79-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.475805 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13b6a06f-420a-420d-8a7c-5a80d312ec79-config\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.478037 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.478063 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a6693d7df02dc11706c5966121d2fd4388360dee4232df45ce02b4702fe89726/globalmount\"" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.482300 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.483209 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/13b6a06f-420a-420d-8a7c-5a80d312ec79-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.489897 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.490836 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrhpc\" (UniqueName: \"kubernetes.io/projected/13b6a06f-420a-420d-8a7c-5a80d312ec79-kube-api-access-mrhpc\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.496969 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/13b6a06f-420a-420d-8a7c-5a80d312ec79-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.519715 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-90d5cffd-85e6-4d5f-99db-528f336b6404\") pod \"ovsdbserver-sb-0\" (UID: \"13b6a06f-420a-420d-8a7c-5a80d312ec79\") " pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:13 crc kubenswrapper[4682]: I1210 11:05:13.617256 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.960911 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz"] Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.962323 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.964760 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-distributor-http" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.964932 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-distributor-grpc" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.965838 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-dockercfg-bv7n4" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.965936 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-config" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.969571 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-ca-bundle" Dec 10 11:05:16 crc kubenswrapper[4682]: I1210 11:05:16.972122 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.149194 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.150928 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.156504 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-distributor-grpc\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.156560 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8133d-aa41-4891-8a66-fafa28cfd141-config\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.156630 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-distributor-http\" (UniqueName: \"kubernetes.io/secret/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-distributor-http\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.156654 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.156697 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z6h7\" (UniqueName: \"kubernetes.io/projected/74c8133d-aa41-4891-8a66-fafa28cfd141-kube-api-access-8z6h7\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.157937 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-loki-s3" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.158067 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-querier-http" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.158202 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-querier-grpc" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.176607 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.227841 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.229769 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.233398 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-query-frontend-http" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.238911 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-query-frontend-grpc" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.254821 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.257945 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-distributor-grpc\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258184 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8133d-aa41-4891-8a66-fafa28cfd141-config\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258212 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g682\" (UniqueName: \"kubernetes.io/projected/6e286958-b529-4f19-b8e3-164e6fe16e70-kube-api-access-6g682\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258258 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258299 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-querier-http\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-querier-http\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258334 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-distributor-http\" (UniqueName: \"kubernetes.io/secret/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-distributor-http\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258362 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258400 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z6h7\" (UniqueName: \"kubernetes.io/projected/74c8133d-aa41-4891-8a66-fafa28cfd141-kube-api-access-8z6h7\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258434 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-querier-grpc\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-querier-grpc\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258497 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.258520 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e286958-b529-4f19-b8e3-164e6fe16e70-config\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.259735 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8133d-aa41-4891-8a66-fafa28cfd141-config\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.260941 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.285159 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z6h7\" (UniqueName: \"kubernetes.io/projected/74c8133d-aa41-4891-8a66-fafa28cfd141-kube-api-access-8z6h7\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.292239 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-distributor-grpc\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.297522 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-distributor-http\" (UniqueName: \"kubernetes.io/secret/74c8133d-aa41-4891-8a66-fafa28cfd141-cloudkitty-lokistack-distributor-http\") pod \"cloudkitty-lokistack-distributor-664b687b54-w4wxz\" (UID: \"74c8133d-aa41-4891-8a66-fafa28cfd141\") " pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.329419 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.330644 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339104 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339660 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway-client-http" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339362 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway-http" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339446 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-gateway-ca-bundle" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339508 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-ca" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339566 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"cloudkitty-lokistack-gateway" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.339636 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-gateway-dockercfg-vpcgt" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.358890 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359568 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359608 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359632 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-query-frontend-grpc\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359661 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-querier-http\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-querier-http\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359694 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31331344-4f2b-497d-9683-ea3e235bf0df-config\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359732 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp6h7\" (UniqueName: \"kubernetes.io/projected/31331344-4f2b-497d-9683-ea3e235bf0df-kube-api-access-kp6h7\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359752 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-querier-grpc\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-querier-grpc\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359782 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359807 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e286958-b529-4f19-b8e3-164e6fe16e70-config\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359833 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-query-frontend-http\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.359872 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g682\" (UniqueName: \"kubernetes.io/projected/6e286958-b529-4f19-b8e3-164e6fe16e70-kube-api-access-6g682\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.362989 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.363506 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e286958-b529-4f19-b8e3-164e6fe16e70-config\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.369167 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.381076 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.385918 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-querier-http\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-querier-http\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.398373 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.402323 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-querier-grpc\" (UniqueName: \"kubernetes.io/secret/6e286958-b529-4f19-b8e3-164e6fe16e70-cloudkitty-lokistack-querier-grpc\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.403091 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g682\" (UniqueName: \"kubernetes.io/projected/6e286958-b529-4f19-b8e3-164e6fe16e70-kube-api-access-6g682\") pod \"cloudkitty-lokistack-querier-5467947bf7-wwt8c\" (UID: \"6e286958-b529-4f19-b8e3-164e6fe16e70\") " pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.436220 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz"] Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.466812 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31331344-4f2b-497d-9683-ea3e235bf0df-config\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.466901 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpfzc\" (UniqueName: \"kubernetes.io/projected/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-kube-api-access-vpfzc\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.466952 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.467024 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp6h7\" (UniqueName: \"kubernetes.io/projected/31331344-4f2b-497d-9683-ea3e235bf0df-kube-api-access-kp6h7\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.467075 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.467141 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.467232 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-query-frontend-http\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.468408 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.469170 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.469254 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.470150 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-query-frontend-grpc\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.470197 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.470306 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.470355 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.476553 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.477681 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31331344-4f2b-497d-9683-ea3e235bf0df-config\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.482019 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-query-frontend-grpc\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.482173 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.501040 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/31331344-4f2b-497d-9683-ea3e235bf0df-cloudkitty-lokistack-query-frontend-http\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.502049 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp6h7\" (UniqueName: \"kubernetes.io/projected/31331344-4f2b-497d-9683-ea3e235bf0df-kube-api-access-kp6h7\") pod \"cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz\" (UID: \"31331344-4f2b-497d-9683-ea3e235bf0df\") " pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.559059 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572522 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572596 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572647 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572714 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572765 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572802 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.572988 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573051 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573102 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573136 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573159 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573218 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds47c\" (UniqueName: \"kubernetes.io/projected/613faa0b-14df-452d-820e-1d3e589b183c-kube-api-access-ds47c\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573270 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpfzc\" (UniqueName: \"kubernetes.io/projected/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-kube-api-access-vpfzc\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573322 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573341 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.573903 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.574352 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.574394 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.574426 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.574455 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.574517 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: E1210 11:05:17.574579 4682 secret.go:188] Couldn't get secret openstack/cloudkitty-lokistack-gateway-http: secret "cloudkitty-lokistack-gateway-http" not found Dec 10 11:05:17 crc kubenswrapper[4682]: E1210 11:05:17.574635 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tls-secret podName:180ae48d-ecb1-4485-b26b-ebaed9cf17e9 nodeName:}" failed. No retries permitted until 2025-12-10 11:05:18.074617402 +0000 UTC m=+1198.394828242 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tls-secret") pod "cloudkitty-lokistack-gateway-bc75944f-m4qwq" (UID: "180ae48d-ecb1-4485-b26b-ebaed9cf17e9") : secret "cloudkitty-lokistack-gateway-http" not found Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.574725 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.575443 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.577027 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.580240 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.582716 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.591266 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpfzc\" (UniqueName: \"kubernetes.io/projected/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-kube-api-access-vpfzc\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675374 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675422 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675461 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds47c\" (UniqueName: \"kubernetes.io/projected/613faa0b-14df-452d-820e-1d3e589b183c-kube-api-access-ds47c\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675512 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675565 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675602 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675622 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675659 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.675680 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.677497 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-rbac\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.679023 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-lokistack-gateway\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.679746 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-gateway-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.680863 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.683254 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.685070 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-tenants\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.685309 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-lokistack-gateway-client-http\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.690956 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/613faa0b-14df-452d-820e-1d3e589b183c-cloudkitty-ca-bundle\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.698329 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds47c\" (UniqueName: \"kubernetes.io/projected/613faa0b-14df-452d-820e-1d3e589b183c-kube-api-access-ds47c\") pod \"cloudkitty-lokistack-gateway-bc75944f-dm6gz\" (UID: \"613faa0b-14df-452d-820e-1d3e589b183c\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:17 crc kubenswrapper[4682]: I1210 11:05:17.746596 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.083730 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.105363 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/180ae48d-ecb1-4485-b26b-ebaed9cf17e9-tls-secret\") pod \"cloudkitty-lokistack-gateway-bc75944f-m4qwq\" (UID: \"180ae48d-ecb1-4485-b26b-ebaed9cf17e9\") " pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.114740 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-ingester-0"] Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.116057 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.121884 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-ingester-0"] Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.146509 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-ingester-http" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.146944 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-ingester-grpc" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.185577 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-compactor-0"] Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.186833 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.189318 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-compactor-http" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.189530 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-compactor-grpc" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.192564 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-compactor-0"] Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.258548 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.265837 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-lokistack-index-gateway-0"] Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.267123 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.270664 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-index-gateway-http" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.280615 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-index-gateway-0"] Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.281081 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-lokistack-index-gateway-grpc" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.286967 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3952c83-e815-459a-bcef-7ab66596b7d2-config\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287059 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287125 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-compactor-grpc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287202 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287234 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-compactor-http\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-compactor-http\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287283 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ingester-grpc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287333 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287404 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ingester-http\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ingester-http\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287499 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287545 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287598 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk2xp\" (UniqueName: \"kubernetes.io/projected/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-kube-api-access-pk2xp\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287621 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287685 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-config\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287782 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhz7k\" (UniqueName: \"kubernetes.io/projected/c3952c83-e815-459a-bcef-7ab66596b7d2-kube-api-access-jhz7k\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.287843 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389501 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389558 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389579 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-compactor-http\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-compactor-http\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389636 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ingester-grpc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389654 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-index-gateway-http\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389712 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389740 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389789 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ingester-http\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ingester-http\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389813 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-config\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389835 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389855 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-lokistack-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-index-gateway-grpc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389874 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389889 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk2xp\" (UniqueName: \"kubernetes.io/projected/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-kube-api-access-pk2xp\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389906 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389938 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-config\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.389964 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390000 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt5st\" (UniqueName: \"kubernetes.io/projected/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-kube-api-access-mt5st\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390031 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhz7k\" (UniqueName: \"kubernetes.io/projected/c3952c83-e815-459a-bcef-7ab66596b7d2-kube-api-access-jhz7k\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390048 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390077 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3952c83-e815-459a-bcef-7ab66596b7d2-config\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390101 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390121 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-compactor-grpc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.390880 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.391525 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.392017 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.392451 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-config\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.394340 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3952c83-e815-459a-bcef-7ab66596b7d2-config\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.394647 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.395793 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-compactor-http\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-compactor-http\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.396007 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-lokistack-compactor-grpc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.396452 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.401563 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.408602 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/c3952c83-e815-459a-bcef-7ab66596b7d2-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.408632 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ingester-http\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ingester-http\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.414296 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk2xp\" (UniqueName: \"kubernetes.io/projected/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-kube-api-access-pk2xp\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.418247 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/ea1f94a0-5b00-4aac-85ae-f7af9df196b6-cloudkitty-lokistack-ingester-grpc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.429050 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhz7k\" (UniqueName: \"kubernetes.io/projected/c3952c83-e815-459a-bcef-7ab66596b7d2-kube-api-access-jhz7k\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.429835 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.435767 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"cloudkitty-lokistack-ingester-0\" (UID: \"ea1f94a0-5b00-4aac-85ae-f7af9df196b6\") " pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.436550 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"cloudkitty-lokistack-compactor-0\" (UID: \"c3952c83-e815-459a-bcef-7ab66596b7d2\") " pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.479363 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.495268 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-config\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.495361 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-index-gateway-grpc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.496304 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.496377 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt5st\" (UniqueName: \"kubernetes.io/projected/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-kube-api-access-mt5st\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.496598 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.496634 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-index-gateway-http\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.496648 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-config\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.496678 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.497337 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-ca-bundle\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.498520 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.500661 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-index-gateway-grpc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.505256 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.509299 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-lokistack-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-lokistack-index-gateway-http\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.511392 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cloudkitty-loki-s3\" (UniqueName: \"kubernetes.io/secret/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-cloudkitty-loki-s3\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.516271 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt5st\" (UniqueName: \"kubernetes.io/projected/4e20f0f2-e92b-4915-a6c8-cff3c50773fc-kube-api-access-mt5st\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.548865 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"cloudkitty-lokistack-index-gateway-0\" (UID: \"4e20f0f2-e92b-4915-a6c8-cff3c50773fc\") " pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:18 crc kubenswrapper[4682]: I1210 11:05:18.626923 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.678459 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.679167 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qkrgx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-j6d45_openstack(bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.680397 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.722746 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.722951 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vsvhq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-9jwk9_openstack(4d54a509-3920-43fa-b916-76ca2c0366ff): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.724089 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" podUID="4d54a509-3920-43fa-b916-76ca2c0366ff" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.832183 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.832337 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wvbkj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-x9mcf_openstack(0dd38c46-9cd1-4ee9-921a-956470e90539): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:05:23 crc kubenswrapper[4682]: E1210 11:05:23.833547 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" podUID="0dd38c46-9cd1-4ee9-921a-956470e90539" Dec 10 11:05:24 crc kubenswrapper[4682]: E1210 11:05:24.124675 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" Dec 10 11:05:24 crc kubenswrapper[4682]: E1210 11:05:24.854880 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 11:05:24 crc kubenswrapper[4682]: E1210 11:05:24.855049 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bdzg4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-lns5b_openstack(5b4961c9-bbce-41a1-815e-9e953082a574): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:05:24 crc kubenswrapper[4682]: E1210 11:05:24.856364 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" Dec 10 11:05:25 crc kubenswrapper[4682]: E1210 11:05:25.184527 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" Dec 10 11:05:26 crc kubenswrapper[4682]: E1210 11:05:26.535553 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 10 11:05:26 crc kubenswrapper[4682]: E1210 11:05:26.536032 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qhh5c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(1b6b3db6-e7bd-4c87-a35a-1f398c40436e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:05:26 crc kubenswrapper[4682]: E1210 11:05:26.537234 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="1b6b3db6-e7bd-4c87-a35a-1f398c40436e" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.653478 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.661905 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.778238 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-dns-svc\") pod \"0dd38c46-9cd1-4ee9-921a-956470e90539\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.778426 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d54a509-3920-43fa-b916-76ca2c0366ff-config\") pod \"4d54a509-3920-43fa-b916-76ca2c0366ff\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.778491 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-config\") pod \"0dd38c46-9cd1-4ee9-921a-956470e90539\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.778529 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsvhq\" (UniqueName: \"kubernetes.io/projected/4d54a509-3920-43fa-b916-76ca2c0366ff-kube-api-access-vsvhq\") pod \"4d54a509-3920-43fa-b916-76ca2c0366ff\" (UID: \"4d54a509-3920-43fa-b916-76ca2c0366ff\") " Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.778572 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvbkj\" (UniqueName: \"kubernetes.io/projected/0dd38c46-9cd1-4ee9-921a-956470e90539-kube-api-access-wvbkj\") pod \"0dd38c46-9cd1-4ee9-921a-956470e90539\" (UID: \"0dd38c46-9cd1-4ee9-921a-956470e90539\") " Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.778764 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0dd38c46-9cd1-4ee9-921a-956470e90539" (UID: "0dd38c46-9cd1-4ee9-921a-956470e90539"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.779028 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d54a509-3920-43fa-b916-76ca2c0366ff-config" (OuterVolumeSpecName: "config") pod "4d54a509-3920-43fa-b916-76ca2c0366ff" (UID: "4d54a509-3920-43fa-b916-76ca2c0366ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.779052 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-config" (OuterVolumeSpecName: "config") pod "0dd38c46-9cd1-4ee9-921a-956470e90539" (UID: "0dd38c46-9cd1-4ee9-921a-956470e90539"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.779944 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d54a509-3920-43fa-b916-76ca2c0366ff-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.779969 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.779983 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dd38c46-9cd1-4ee9-921a-956470e90539-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.783940 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dd38c46-9cd1-4ee9-921a-956470e90539-kube-api-access-wvbkj" (OuterVolumeSpecName: "kube-api-access-wvbkj") pod "0dd38c46-9cd1-4ee9-921a-956470e90539" (UID: "0dd38c46-9cd1-4ee9-921a-956470e90539"). InnerVolumeSpecName "kube-api-access-wvbkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.795011 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d54a509-3920-43fa-b916-76ca2c0366ff-kube-api-access-vsvhq" (OuterVolumeSpecName: "kube-api-access-vsvhq") pod "4d54a509-3920-43fa-b916-76ca2c0366ff" (UID: "4d54a509-3920-43fa-b916-76ca2c0366ff"). InnerVolumeSpecName "kube-api-access-vsvhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.882166 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsvhq\" (UniqueName: \"kubernetes.io/projected/4d54a509-3920-43fa-b916-76ca2c0366ff-kube-api-access-vsvhq\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:26 crc kubenswrapper[4682]: I1210 11:05:26.882207 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvbkj\" (UniqueName: \"kubernetes.io/projected/0dd38c46-9cd1-4ee9-921a-956470e90539-kube-api-access-wvbkj\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.200012 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" event={"ID":"0dd38c46-9cd1-4ee9-921a-956470e90539","Type":"ContainerDied","Data":"7774409bed762879a024a4638b5e7018584d622d9f5fe92ac487a01b0b8f9061"} Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.200033 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-x9mcf" Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.202260 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" event={"ID":"4d54a509-3920-43fa-b916-76ca2c0366ff","Type":"ContainerDied","Data":"47f32cc3dfc36254a443eff838df12653d0f64de6566ad5c26f697aad8117728"} Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.202362 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-9jwk9" Dec 10 11:05:27 crc kubenswrapper[4682]: E1210 11:05:27.211210 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="1b6b3db6-e7bd-4c87-a35a-1f398c40436e" Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.274629 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-x9mcf"] Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.285607 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-x9mcf"] Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.306592 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9jwk9"] Dec 10 11:05:27 crc kubenswrapper[4682]: I1210 11:05:27.313226 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-9jwk9"] Dec 10 11:05:28 crc kubenswrapper[4682]: I1210 11:05:28.404563 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dd38c46-9cd1-4ee9-921a-956470e90539" path="/var/lib/kubelet/pods/0dd38c46-9cd1-4ee9-921a-956470e90539/volumes" Dec 10 11:05:28 crc kubenswrapper[4682]: I1210 11:05:28.408783 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d54a509-3920-43fa-b916-76ca2c0366ff" path="/var/lib/kubelet/pods/4d54a509-3920-43fa-b916-76ca2c0366ff/volumes" Dec 10 11:05:28 crc kubenswrapper[4682]: I1210 11:05:28.764615 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-4b2ch"] Dec 10 11:05:28 crc kubenswrapper[4682]: I1210 11:05:28.837863 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c"] Dec 10 11:05:28 crc kubenswrapper[4682]: I1210 11:05:28.845039 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:05:28 crc kubenswrapper[4682]: W1210 11:05:28.873754 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd67c4ad0_1464_4f7f_9877_8601f9b2c3b0.slice/crio-41a7f696dce11586a37cad24c828eb6441092497c1ed4aed4c1fcd770e20cc12 WatchSource:0}: Error finding container 41a7f696dce11586a37cad24c828eb6441092497c1ed4aed4c1fcd770e20cc12: Status 404 returned error can't find the container with id 41a7f696dce11586a37cad24c828eb6441092497c1ed4aed4c1fcd770e20cc12 Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.136216 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-compactor-0"] Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.150057 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz"] Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.156497 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3952c83_e815_459a_bcef_7ab66596b7d2.slice/crio-da4e6a541e94c539490c110d9b43a683322b5c74dada34ae7bc3a2e9dd35e30e WatchSource:0}: Error finding container da4e6a541e94c539490c110d9b43a683322b5c74dada34ae7bc3a2e9dd35e30e: Status 404 returned error can't find the container with id da4e6a541e94c539490c110d9b43a683322b5c74dada34ae7bc3a2e9dd35e30e Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.175388 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea1f94a0_5b00_4aac_85ae_f7af9df196b6.slice/crio-44b763c8d82d5a1fe9b664f9baff8da243ff9af8b56c81c3bbbe537e99fdf755 WatchSource:0}: Error finding container 44b763c8d82d5a1fe9b664f9baff8da243ff9af8b56c81c3bbbe537e99fdf755: Status 404 returned error can't find the container with id 44b763c8d82d5a1fe9b664f9baff8da243ff9af8b56c81c3bbbe537e99fdf755 Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.177664 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-ingester-0"] Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.188272 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w7jxw"] Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.214914 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.226333 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerStarted","Data":"41a7f696dce11586a37cad24c828eb6441092497c1ed4aed4c1fcd770e20cc12"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.229552 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-compactor-0" event={"ID":"c3952c83-e815-459a-bcef-7ab66596b7d2","Type":"ContainerStarted","Data":"da4e6a541e94c539490c110d9b43a683322b5c74dada34ae7bc3a2e9dd35e30e"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.231872 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-ingester-0" event={"ID":"ea1f94a0-5b00-4aac-85ae-f7af9df196b6","Type":"ContainerStarted","Data":"44b763c8d82d5a1fe9b664f9baff8da243ff9af8b56c81c3bbbe537e99fdf755"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.232676 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4b2ch" event={"ID":"8ee7ede4-07ea-4b15-88e7-15477c99d5ab","Type":"ContainerStarted","Data":"de5ff69f8bf9c2fac927fb658ca07a29aa7b5eb1c842ca8cfe4537eebd2faec2"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.233809 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"55723944-339e-4ed6-9159-9696ca1debeb","Type":"ContainerStarted","Data":"f21e2a0f9c51c0aac8e619e07dbaaa439c8b7cfda6b2b580046560637fdec851"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.235289 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" event={"ID":"6e286958-b529-4f19-b8e3-164e6fe16e70","Type":"ContainerStarted","Data":"abd4171c0c1ff37684165589d02f18e64657b5c319ceac8dd84c01b34b30a932"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.237406 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w7jxw" event={"ID":"df9d7d76-fa02-41c5-b652-ea9b7b00bd00","Type":"ContainerStarted","Data":"7de02fe044d7b8093e70623f7fa0785973186f11471ea2f0377b6fd1a32a1ac7"} Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.238736 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" event={"ID":"31331344-4f2b-497d-9683-ea3e235bf0df","Type":"ContainerStarted","Data":"6322878287ce199e599c92d866564d7dc751a020f914e7a424ad862311c69fe2"} Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.270361 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13b6a06f_420a_420d_8a7c_5a80d312ec79.slice/crio-25a55614701833759445c37361b5aa419c2f26ceec61818d6a31555be8a3aedf WatchSource:0}: Error finding container 25a55614701833759445c37361b5aa419c2f26ceec61818d6a31555be8a3aedf: Status 404 returned error can't find the container with id 25a55614701833759445c37361b5aa419c2f26ceec61818d6a31555be8a3aedf Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.274161 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.460391 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod180ae48d_ecb1_4485_b26b_ebaed9cf17e9.slice/crio-4e0d803c49e4ebd22e927e3e90e067794992dd52a7fb49e4b2dbb43d7c81e747 WatchSource:0}: Error finding container 4e0d803c49e4ebd22e927e3e90e067794992dd52a7fb49e4b2dbb43d7c81e747: Status 404 returned error can't find the container with id 4e0d803c49e4ebd22e927e3e90e067794992dd52a7fb49e4b2dbb43d7c81e747 Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.466735 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq"] Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.487302 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e20f0f2_e92b_4915_a6c8_cff3c50773fc.slice/crio-c3c0715f72f6b583f53c3ff2a9e694faeca05dd19daf9cdf3c06c34d2661bca8 WatchSource:0}: Error finding container c3c0715f72f6b583f53c3ff2a9e694faeca05dd19daf9cdf3c06c34d2661bca8: Status 404 returned error can't find the container with id c3c0715f72f6b583f53c3ff2a9e694faeca05dd19daf9cdf3c06c34d2661bca8 Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.488839 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-index-gateway-0"] Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.524369 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz"] Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.531410 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod613faa0b_14df_452d_820e_1d3e589b183c.slice/crio-b27fed4534dbd53f99538be68976a061581bf3d47450b78d13a296afa1481d59 WatchSource:0}: Error finding container b27fed4534dbd53f99538be68976a061581bf3d47450b78d13a296afa1481d59: Status 404 returned error can't find the container with id b27fed4534dbd53f99538be68976a061581bf3d47450b78d13a296afa1481d59 Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.536836 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz"] Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.544613 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74c8133d_aa41_4891_8a66_fafa28cfd141.slice/crio-4ce7d96a9e55818bb8f550f46851d13c86d8ff9af77a1b55ea7f9e0e37937ce4 WatchSource:0}: Error finding container 4ce7d96a9e55818bb8f550f46851d13c86d8ff9af77a1b55ea7f9e0e37937ce4: Status 404 returned error can't find the container with id 4ce7d96a9e55818bb8f550f46851d13c86d8ff9af77a1b55ea7f9e0e37937ce4 Dec 10 11:05:29 crc kubenswrapper[4682]: E1210 11:05:29.549822 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 10 11:05:29 crc kubenswrapper[4682]: E1210 11:05:29.550020 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ld6l6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(d9f85710-54c3-4f30-88f6-bb97f9a200e8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:05:29 crc kubenswrapper[4682]: E1210 11:05:29.550239 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:loki-distributor,Image:registry.redhat.io/openshift-logging/logging-loki-rhel9@sha256:06b83c3cbf0c5db4dd9812e046ca14189d18cf7b3c7f2f2c37aa705cc5f5deb7,Command:[],Args:[-target=distributor -config.file=/etc/loki/config/config.yaml -runtime-config.file=/etc/loki/config/runtime-config.yaml -config.expand-env=true],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:3100,Protocol:TCP,HostIP:,},ContainerPort{Name:grpclb,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:gossip-ring,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},ContainerPort{Name:healthchecks,HostPort:0,ContainerPort:3101,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/loki/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cloudkitty-lokistack-distributor-http,ReadOnly:false,MountPath:/var/run/tls/http/server,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cloudkitty-lokistack-distributor-grpc,ReadOnly:false,MountPath:/var/run/tls/grpc/server,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cloudkitty-lokistack-ca-bundle,ReadOnly:false,MountPath:/var/run/ca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8z6h7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/loki/api/v1/status/buildinfo,Port:{0 3101 },Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:2,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 3101 },Host:,Scheme:HTTPS,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-lokistack-distributor-664b687b54-w4wxz_openstack(74c8133d-aa41-4891-8a66-fafa28cfd141): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 11:05:29 crc kubenswrapper[4682]: E1210 11:05:29.551744 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="d9f85710-54c3-4f30-88f6-bb97f9a200e8" Dec 10 11:05:29 crc kubenswrapper[4682]: E1210 11:05:29.551755 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"loki-distributor\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" podUID="74c8133d-aa41-4891-8a66-fafa28cfd141" Dec 10 11:05:29 crc kubenswrapper[4682]: I1210 11:05:29.562311 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 11:05:29 crc kubenswrapper[4682]: W1210 11:05:29.574307 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97a3c791_f1b7_4665_ae8b_fa87d1ee73e1.slice/crio-a0a20b7b2bfe7ff9f593328ef24feaafd2d88e2aafe31623616475d6f9e1e3d3 WatchSource:0}: Error finding container a0a20b7b2bfe7ff9f593328ef24feaafd2d88e2aafe31623616475d6f9e1e3d3: Status 404 returned error can't find the container with id a0a20b7b2bfe7ff9f593328ef24feaafd2d88e2aafe31623616475d6f9e1e3d3 Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.258812 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" event={"ID":"613faa0b-14df-452d-820e-1d3e589b183c","Type":"ContainerStarted","Data":"b27fed4534dbd53f99538be68976a061581bf3d47450b78d13a296afa1481d59"} Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.261943 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"9d4f095c608a9033903a024629d6bdbb8e05d5ec10f831b06e26d70cfeb1c556"} Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.263366 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" event={"ID":"74c8133d-aa41-4891-8a66-fafa28cfd141","Type":"ContainerStarted","Data":"4ce7d96a9e55818bb8f550f46851d13c86d8ff9af77a1b55ea7f9e0e37937ce4"} Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.265904 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"13b6a06f-420a-420d-8a7c-5a80d312ec79","Type":"ContainerStarted","Data":"25a55614701833759445c37361b5aa419c2f26ceec61818d6a31555be8a3aedf"} Dec 10 11:05:30 crc kubenswrapper[4682]: E1210 11:05:30.265945 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"loki-distributor\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift-logging/logging-loki-rhel9@sha256:06b83c3cbf0c5db4dd9812e046ca14189d18cf7b3c7f2f2c37aa705cc5f5deb7\\\"\"" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" podUID="74c8133d-aa41-4891-8a66-fafa28cfd141" Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.267278 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1","Type":"ContainerStarted","Data":"a0a20b7b2bfe7ff9f593328ef24feaafd2d88e2aafe31623616475d6f9e1e3d3"} Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.269972 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"63a61a31-230d-455f-b27a-87760ae46c25","Type":"ContainerStarted","Data":"f1c7a591ea8e8efbcffaa113f2133191d970c6efe3255fbba026e5389850ba25"} Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.270115 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.272623 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" event={"ID":"180ae48d-ecb1-4485-b26b-ebaed9cf17e9","Type":"ContainerStarted","Data":"4e0d803c49e4ebd22e927e3e90e067794992dd52a7fb49e4b2dbb43d7c81e747"} Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.273917 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-index-gateway-0" event={"ID":"4e20f0f2-e92b-4915-a6c8-cff3c50773fc","Type":"ContainerStarted","Data":"c3c0715f72f6b583f53c3ff2a9e694faeca05dd19daf9cdf3c06c34d2661bca8"} Dec 10 11:05:30 crc kubenswrapper[4682]: E1210 11:05:30.275426 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="d9f85710-54c3-4f30-88f6-bb97f9a200e8" Dec 10 11:05:30 crc kubenswrapper[4682]: I1210 11:05:30.314741 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.097770821 podStartE2EDuration="27.314721934s" podCreationTimestamp="2025-12-10 11:05:03 +0000 UTC" firstStartedPulling="2025-12-10 11:05:04.7372814 +0000 UTC m=+1185.057492140" lastFinishedPulling="2025-12-10 11:05:28.954232503 +0000 UTC m=+1209.274443253" observedRunningTime="2025-12-10 11:05:30.309456286 +0000 UTC m=+1210.629667086" watchObservedRunningTime="2025-12-10 11:05:30.314721934 +0000 UTC m=+1210.634932674" Dec 10 11:05:31 crc kubenswrapper[4682]: I1210 11:05:31.284843 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7362d622-686c-48e5-b0de-562fae10bc35","Type":"ContainerStarted","Data":"d334adfb35be27b46279f9611f45e3a210c4fdbf44ceb555dadc28eb89ea99ae"} Dec 10 11:05:31 crc kubenswrapper[4682]: I1210 11:05:31.287751 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c211ac37-0b53-466f-ad83-7062f681c32b","Type":"ContainerStarted","Data":"ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b"} Dec 10 11:05:31 crc kubenswrapper[4682]: E1210 11:05:31.289761 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"loki-distributor\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift-logging/logging-loki-rhel9@sha256:06b83c3cbf0c5db4dd9812e046ca14189d18cf7b3c7f2f2c37aa705cc5f5deb7\\\"\"" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" podUID="74c8133d-aa41-4891-8a66-fafa28cfd141" Dec 10 11:05:38 crc kubenswrapper[4682]: I1210 11:05:38.357399 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-ingester-0" event={"ID":"ea1f94a0-5b00-4aac-85ae-f7af9df196b6","Type":"ContainerStarted","Data":"cf58b0dedadd7e13ed3f343f20b6101408215f86d6a048bbcc24b5968fb31928"} Dec 10 11:05:38 crc kubenswrapper[4682]: I1210 11:05:38.358704 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:05:38 crc kubenswrapper[4682]: I1210 11:05:38.375746 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-ingester-0" podStartSLOduration=13.364256042 podStartE2EDuration="21.375732936s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.179964523 +0000 UTC m=+1209.500175273" lastFinishedPulling="2025-12-10 11:05:37.191441427 +0000 UTC m=+1217.511652167" observedRunningTime="2025-12-10 11:05:38.374764755 +0000 UTC m=+1218.694975525" watchObservedRunningTime="2025-12-10 11:05:38.375732936 +0000 UTC m=+1218.695943676" Dec 10 11:05:38 crc kubenswrapper[4682]: I1210 11:05:38.560625 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.366383 4682 generic.go:334] "Generic (PLEG): container finished" podID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerID="ccaf461857083b4af5f1e513a4962ed287e2efd7f9c51889f6188b6ad28e59bc" exitCode=0 Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.366481 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" event={"ID":"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8","Type":"ContainerDied","Data":"ccaf461857083b4af5f1e513a4962ed287e2efd7f9c51889f6188b6ad28e59bc"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.369440 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"13b6a06f-420a-420d-8a7c-5a80d312ec79","Type":"ContainerStarted","Data":"88262d0da73fd783b0a42c4f9d77a606a49b9d843f8b1b5aa53189bb5ab203f9"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.371716 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" event={"ID":"6e286958-b529-4f19-b8e3-164e6fe16e70","Type":"ContainerStarted","Data":"b04c2b3d6fa526bfb827b3ad5f52170fe62079b01c440b5886c2c88a7df237d4"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.371836 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.373663 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af0ab072-8822-403b-ac67-8689937752bd","Type":"ContainerStarted","Data":"52324fb463d9374fe418fc5517f460fb14e8b91050922a1ac5fb777a5aed90c6"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.373823 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.376439 4682 generic.go:334] "Generic (PLEG): container finished" podID="8ee7ede4-07ea-4b15-88e7-15477c99d5ab" containerID="132fac2f8e9050e3eb9a57f7d5885a7fd15ed75e94f26451eafb9824389b086b" exitCode=0 Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.376519 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4b2ch" event={"ID":"8ee7ede4-07ea-4b15-88e7-15477c99d5ab","Type":"ContainerDied","Data":"132fac2f8e9050e3eb9a57f7d5885a7fd15ed75e94f26451eafb9824389b086b"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.379392 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" event={"ID":"613faa0b-14df-452d-820e-1d3e589b183c","Type":"ContainerStarted","Data":"4546fecbf1d7870ef47c431ba9aaef90f141eb07abd2835387bf8b79ecf6db56"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.379803 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.386109 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w7jxw" event={"ID":"df9d7d76-fa02-41c5-b652-ea9b7b00bd00","Type":"ContainerStarted","Data":"7efeb88b003dc83236c4747886f0dd02b71ca13aa5b29ab0d11be867e6aa0781"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.386503 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-w7jxw" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.389297 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" event={"ID":"31331344-4f2b-497d-9683-ea3e235bf0df","Type":"ContainerStarted","Data":"0468291b85db552863139f4d92a7430d94b26da82dcfe1792ffaea924d894bab"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.389920 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.391577 4682 generic.go:334] "Generic (PLEG): container finished" podID="5b4961c9-bbce-41a1-815e-9e953082a574" containerID="971bd5f6a593f1bd2bf06d169c545e8e8812c95d5249a092d7ce9755f6c61c16" exitCode=0 Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.391634 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" event={"ID":"5b4961c9-bbce-41a1-815e-9e953082a574","Type":"ContainerDied","Data":"971bd5f6a593f1bd2bf06d169c545e8e8812c95d5249a092d7ce9755f6c61c16"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.393447 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-index-gateway-0" event={"ID":"4e20f0f2-e92b-4915-a6c8-cff3c50773fc","Type":"ContainerStarted","Data":"ab049f6aaa1660a6f2ba2598425eed6546d5681614c2a39c5b99bb1adfde7842"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.394314 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.395755 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-compactor-0" event={"ID":"c3952c83-e815-459a-bcef-7ab66596b7d2","Type":"ContainerStarted","Data":"fc0ce6ad5419203206b64bdae20e7639f577765271697c476cd8f4902d44698e"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.395832 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.397274 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1","Type":"ContainerStarted","Data":"2a1983d5a074122dc612dcd41e226e3d27992137d7e951cbaacdc6ef1b298387"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.400693 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=9.219200525 podStartE2EDuration="35.400681559s" podCreationTimestamp="2025-12-10 11:05:04 +0000 UTC" firstStartedPulling="2025-12-10 11:05:11.071292365 +0000 UTC m=+1191.391503115" lastFinishedPulling="2025-12-10 11:05:37.252773399 +0000 UTC m=+1217.572984149" observedRunningTime="2025-12-10 11:05:39.398913204 +0000 UTC m=+1219.719123974" watchObservedRunningTime="2025-12-10 11:05:39.400681559 +0000 UTC m=+1219.720892309" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.403311 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" event={"ID":"180ae48d-ecb1-4485-b26b-ebaed9cf17e9","Type":"ContainerStarted","Data":"9a4df10e399a86f86d5050fb31d77f3a1a073a5240d03b390c651b5cc6168723"} Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.404111 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.408821 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.420296 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.425377 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-dm6gz" podStartSLOduration=14.701726603000001 podStartE2EDuration="22.425356101s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.536689871 +0000 UTC m=+1209.856900621" lastFinishedPulling="2025-12-10 11:05:37.260319359 +0000 UTC m=+1217.580530119" observedRunningTime="2025-12-10 11:05:39.417907115 +0000 UTC m=+1219.738117875" watchObservedRunningTime="2025-12-10 11:05:39.425356101 +0000 UTC m=+1219.745566851" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.507968 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" podStartSLOduration=14.169609541 podStartE2EDuration="22.507955037s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:28.953925084 +0000 UTC m=+1209.274135834" lastFinishedPulling="2025-12-10 11:05:37.29227057 +0000 UTC m=+1217.612481330" observedRunningTime="2025-12-10 11:05:39.506347216 +0000 UTC m=+1219.826557966" watchObservedRunningTime="2025-12-10 11:05:39.507955037 +0000 UTC m=+1219.828165787" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.542635 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-gateway-bc75944f-m4qwq" podStartSLOduration=15.053650748999999 podStartE2EDuration="22.542615834s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.463183943 +0000 UTC m=+1209.783394693" lastFinishedPulling="2025-12-10 11:05:36.952149028 +0000 UTC m=+1217.272359778" observedRunningTime="2025-12-10 11:05:39.536973836 +0000 UTC m=+1219.857184606" watchObservedRunningTime="2025-12-10 11:05:39.542615834 +0000 UTC m=+1219.862826584" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.563403 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-compactor-0" podStartSLOduration=14.432966942 podStartE2EDuration="22.563387413s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.161295542 +0000 UTC m=+1209.481506292" lastFinishedPulling="2025-12-10 11:05:37.291716013 +0000 UTC m=+1217.611926763" observedRunningTime="2025-12-10 11:05:39.554669246 +0000 UTC m=+1219.874879996" watchObservedRunningTime="2025-12-10 11:05:39.563387413 +0000 UTC m=+1219.883598153" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.582719 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" podStartSLOduration=14.502550995 podStartE2EDuration="22.582698084s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.173177918 +0000 UTC m=+1209.493388668" lastFinishedPulling="2025-12-10 11:05:37.253325007 +0000 UTC m=+1217.573535757" observedRunningTime="2025-12-10 11:05:39.579534934 +0000 UTC m=+1219.899745694" watchObservedRunningTime="2025-12-10 11:05:39.582698084 +0000 UTC m=+1219.902908834" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.609512 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-w7jxw" podStartSLOduration=22.501395018 podStartE2EDuration="30.609461921s" podCreationTimestamp="2025-12-10 11:05:09 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.188480073 +0000 UTC m=+1209.508690823" lastFinishedPulling="2025-12-10 11:05:37.296546976 +0000 UTC m=+1217.616757726" observedRunningTime="2025-12-10 11:05:39.605975532 +0000 UTC m=+1219.926186282" watchObservedRunningTime="2025-12-10 11:05:39.609461921 +0000 UTC m=+1219.929672671" Dec 10 11:05:39 crc kubenswrapper[4682]: I1210 11:05:39.650717 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-index-gateway-0" podStartSLOduration=14.851523638 podStartE2EDuration="22.650701748s" podCreationTimestamp="2025-12-10 11:05:17 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.49118756 +0000 UTC m=+1209.811398310" lastFinishedPulling="2025-12-10 11:05:37.29036568 +0000 UTC m=+1217.610576420" observedRunningTime="2025-12-10 11:05:39.649362635 +0000 UTC m=+1219.969573405" watchObservedRunningTime="2025-12-10 11:05:39.650701748 +0000 UTC m=+1219.970912488" Dec 10 11:05:40 crc kubenswrapper[4682]: I1210 11:05:40.430045 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" event={"ID":"5b4961c9-bbce-41a1-815e-9e953082a574","Type":"ContainerStarted","Data":"9a158f5bf1938daa535c83a2ce93ac138d7659ab4241f120dba9bc815b5b787e"} Dec 10 11:05:40 crc kubenswrapper[4682]: I1210 11:05:40.431406 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:05:40 crc kubenswrapper[4682]: I1210 11:05:40.438615 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"55723944-339e-4ed6-9159-9696ca1debeb","Type":"ContainerStarted","Data":"7f2d34c5d2079a5a8d5e76c77db742611439685d61ce93297ef96b36de75a4cf"} Dec 10 11:05:40 crc kubenswrapper[4682]: I1210 11:05:40.461662 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" podStartSLOduration=4.357868727 podStartE2EDuration="42.461638762s" podCreationTimestamp="2025-12-10 11:04:58 +0000 UTC" firstStartedPulling="2025-12-10 11:04:59.248844957 +0000 UTC m=+1179.569055707" lastFinishedPulling="2025-12-10 11:05:37.352614992 +0000 UTC m=+1217.672825742" observedRunningTime="2025-12-10 11:05:40.45050678 +0000 UTC m=+1220.770717550" watchObservedRunningTime="2025-12-10 11:05:40.461638762 +0000 UTC m=+1220.781849512" Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.448200 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerStarted","Data":"262a2eaa41ed02832be58b796ae0f29f7a98d824957a7877a0377701f25f0224"} Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.452772 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4b2ch" event={"ID":"8ee7ede4-07ea-4b15-88e7-15477c99d5ab","Type":"ContainerStarted","Data":"908d4e737d1ad5f01faa1c41e3621dfffbd357c054f6db29b99e88019c308e8a"} Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.452811 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-4b2ch" event={"ID":"8ee7ede4-07ea-4b15-88e7-15477c99d5ab","Type":"ContainerStarted","Data":"fbdce3619d4369c92dba2050795a54b3cb6fbe4e3e32adb1fefdb30eda091619"} Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.452846 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.452863 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.454899 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" event={"ID":"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8","Type":"ContainerStarted","Data":"3471b045b620bb2b34b9814fc9e46ad102fa41d37a049588531b2c75b6396389"} Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.491605 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" podStartSLOduration=-9223371994.363188 podStartE2EDuration="42.491588934s" podCreationTimestamp="2025-12-10 11:04:59 +0000 UTC" firstStartedPulling="2025-12-10 11:05:00.192521366 +0000 UTC m=+1180.512732116" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:05:41.485384207 +0000 UTC m=+1221.805594957" watchObservedRunningTime="2025-12-10 11:05:41.491588934 +0000 UTC m=+1221.811799674" Dec 10 11:05:41 crc kubenswrapper[4682]: I1210 11:05:41.509792 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-4b2ch" podStartSLOduration=24.27233387 podStartE2EDuration="32.5097654s" podCreationTimestamp="2025-12-10 11:05:09 +0000 UTC" firstStartedPulling="2025-12-10 11:05:28.95410265 +0000 UTC m=+1209.274313400" lastFinishedPulling="2025-12-10 11:05:37.19153418 +0000 UTC m=+1217.511744930" observedRunningTime="2025-12-10 11:05:41.505445482 +0000 UTC m=+1221.825656272" watchObservedRunningTime="2025-12-10 11:05:41.5097654 +0000 UTC m=+1221.829976150" Dec 10 11:05:44 crc kubenswrapper[4682]: I1210 11:05:44.606578 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.279741 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.429377 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-lns5b"] Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.429786 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" containerName="dnsmasq-dns" containerID="cri-o://9a158f5bf1938daa535c83a2ce93ac138d7659ab4241f120dba9bc815b5b787e" gracePeriod=10 Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.446777 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.527511 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkflb"] Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.529579 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.551379 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkflb"] Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.624121 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxl2g\" (UniqueName: \"kubernetes.io/projected/edac3774-26dc-4253-8c10-9400ae914cfd-kube-api-access-vxl2g\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.624635 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.624840 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-config\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.727508 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.727600 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-config\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.727652 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxl2g\" (UniqueName: \"kubernetes.io/projected/edac3774-26dc-4253-8c10-9400ae914cfd-kube-api-access-vxl2g\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.728629 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-config\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.729557 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.749167 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxl2g\" (UniqueName: \"kubernetes.io/projected/edac3774-26dc-4253-8c10-9400ae914cfd-kube-api-access-vxl2g\") pod \"dnsmasq-dns-7cb5889db5-bkflb\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:45 crc kubenswrapper[4682]: I1210 11:05:45.865156 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.540937 4682 generic.go:334] "Generic (PLEG): container finished" podID="55723944-339e-4ed6-9159-9696ca1debeb" containerID="7f2d34c5d2079a5a8d5e76c77db742611439685d61ce93297ef96b36de75a4cf" exitCode=0 Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.541255 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"55723944-339e-4ed6-9159-9696ca1debeb","Type":"ContainerDied","Data":"7f2d34c5d2079a5a8d5e76c77db742611439685d61ce93297ef96b36de75a4cf"} Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.544774 4682 generic.go:334] "Generic (PLEG): container finished" podID="5b4961c9-bbce-41a1-815e-9e953082a574" containerID="9a158f5bf1938daa535c83a2ce93ac138d7659ab4241f120dba9bc815b5b787e" exitCode=0 Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.544818 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" event={"ID":"5b4961c9-bbce-41a1-815e-9e953082a574","Type":"ContainerDied","Data":"9a158f5bf1938daa535c83a2ce93ac138d7659ab4241f120dba9bc815b5b787e"} Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.578286 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.592581 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.593424 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.595392 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.596856 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-4w6gm" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.597056 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.597058 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.743986 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.744399 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.744455 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhvcb\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-kube-api-access-nhvcb\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.744684 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b7a82b72-0262-4a74-becf-36ead02cb92c-lock\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.744756 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b7a82b72-0262-4a74-becf-36ead02cb92c-cache\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.846704 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.846779 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhvcb\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-kube-api-access-nhvcb\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.846834 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b7a82b72-0262-4a74-becf-36ead02cb92c-lock\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.846861 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b7a82b72-0262-4a74-becf-36ead02cb92c-cache\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.846985 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: E1210 11:05:46.847121 4682 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:05:46 crc kubenswrapper[4682]: E1210 11:05:46.847138 4682 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:05:46 crc kubenswrapper[4682]: E1210 11:05:46.847192 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift podName:b7a82b72-0262-4a74-becf-36ead02cb92c nodeName:}" failed. No retries permitted until 2025-12-10 11:05:47.347167988 +0000 UTC m=+1227.667378738 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift") pod "swift-storage-0" (UID: "b7a82b72-0262-4a74-becf-36ead02cb92c") : configmap "swift-ring-files" not found Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.847801 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b7a82b72-0262-4a74-becf-36ead02cb92c-lock\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.849562 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b7a82b72-0262-4a74-becf-36ead02cb92c-cache\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.850702 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.850740 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d25aa7f1f520abd74d612392661977f5413efdb187a5cb6e3d9e62292f859fb3/globalmount\"" pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.855738 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.889583 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhvcb\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-kube-api-access-nhvcb\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.948707 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-config\") pod \"5b4961c9-bbce-41a1-815e-9e953082a574\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.948756 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdzg4\" (UniqueName: \"kubernetes.io/projected/5b4961c9-bbce-41a1-815e-9e953082a574-kube-api-access-bdzg4\") pod \"5b4961c9-bbce-41a1-815e-9e953082a574\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.948902 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-dns-svc\") pod \"5b4961c9-bbce-41a1-815e-9e953082a574\" (UID: \"5b4961c9-bbce-41a1-815e-9e953082a574\") " Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.951508 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ea8336ef-b7d0-491d-ae81-a1f290afeeb1\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:46 crc kubenswrapper[4682]: I1210 11:05:46.971747 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b4961c9-bbce-41a1-815e-9e953082a574-kube-api-access-bdzg4" (OuterVolumeSpecName: "kube-api-access-bdzg4") pod "5b4961c9-bbce-41a1-815e-9e953082a574" (UID: "5b4961c9-bbce-41a1-815e-9e953082a574"). InnerVolumeSpecName "kube-api-access-bdzg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.004064 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5b4961c9-bbce-41a1-815e-9e953082a574" (UID: "5b4961c9-bbce-41a1-815e-9e953082a574"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.019220 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-config" (OuterVolumeSpecName: "config") pod "5b4961c9-bbce-41a1-815e-9e953082a574" (UID: "5b4961c9-bbce-41a1-815e-9e953082a574"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.051605 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.051631 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdzg4\" (UniqueName: \"kubernetes.io/projected/5b4961c9-bbce-41a1-815e-9e953082a574-kube-api-access-bdzg4\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.051641 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b4961c9-bbce-41a1-815e-9e953082a574-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.232519 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-n9b92"] Dec 10 11:05:47 crc kubenswrapper[4682]: E1210 11:05:47.232923 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" containerName="init" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.232948 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" containerName="init" Dec 10 11:05:47 crc kubenswrapper[4682]: E1210 11:05:47.232963 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" containerName="dnsmasq-dns" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.232971 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" containerName="dnsmasq-dns" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.233171 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" containerName="dnsmasq-dns" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.233941 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.241947 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.242207 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.242550 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.251883 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-n9b92"] Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.309679 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkflb"] Dec 10 11:05:47 crc kubenswrapper[4682]: W1210 11:05:47.321841 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedac3774_26dc_4253_8c10_9400ae914cfd.slice/crio-8f0ad6eb6c43a2590152dc0d0fd36d7122927bf1dc9dd7acf73ae70f71b50c9e WatchSource:0}: Error finding container 8f0ad6eb6c43a2590152dc0d0fd36d7122927bf1dc9dd7acf73ae70f71b50c9e: Status 404 returned error can't find the container with id 8f0ad6eb6c43a2590152dc0d0fd36d7122927bf1dc9dd7acf73ae70f71b50c9e Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355499 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-combined-ca-bundle\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355571 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355670 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhfwg\" (UniqueName: \"kubernetes.io/projected/4c235968-0ec4-4c4f-98c4-6b19fa58e826-kube-api-access-jhfwg\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355707 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-swiftconf\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355788 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-scripts\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355827 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-ring-data-devices\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355866 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c235968-0ec4-4c4f-98c4-6b19fa58e826-etc-swift\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: E1210 11:05:47.355911 4682 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:05:47 crc kubenswrapper[4682]: E1210 11:05:47.355931 4682 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.355951 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-dispersionconf\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: E1210 11:05:47.355976 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift podName:b7a82b72-0262-4a74-becf-36ead02cb92c nodeName:}" failed. No retries permitted until 2025-12-10 11:05:48.355958773 +0000 UTC m=+1228.676169613 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift") pod "swift-storage-0" (UID: "b7a82b72-0262-4a74-becf-36ead02cb92c") : configmap "swift-ring-files" not found Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.457933 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-scripts\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458207 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-ring-data-devices\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458339 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c235968-0ec4-4c4f-98c4-6b19fa58e826-etc-swift\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458438 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-dispersionconf\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458656 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-combined-ca-bundle\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458860 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhfwg\" (UniqueName: \"kubernetes.io/projected/4c235968-0ec4-4c4f-98c4-6b19fa58e826-kube-api-access-jhfwg\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458970 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-swiftconf\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.459488 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c235968-0ec4-4c4f-98c4-6b19fa58e826-etc-swift\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.458895 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-scripts\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.459633 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-ring-data-devices\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.464856 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-dispersionconf\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.465106 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-swiftconf\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.465199 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-combined-ca-bundle\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.476014 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhfwg\" (UniqueName: \"kubernetes.io/projected/4c235968-0ec4-4c4f-98c4-6b19fa58e826-kube-api-access-jhfwg\") pod \"swift-ring-rebalance-n9b92\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.555137 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1b6b3db6-e7bd-4c87-a35a-1f398c40436e","Type":"ContainerStarted","Data":"806a8f67a2dfd673598b9e90f44cd94a6f18b32f69116267e9cd42e6881f653c"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.558454 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d9f85710-54c3-4f30-88f6-bb97f9a200e8","Type":"ContainerStarted","Data":"7725213501aff75072815646c590610afda1a0996cdcd576d3d522ef3098a7ca"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.562039 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" event={"ID":"edac3774-26dc-4253-8c10-9400ae914cfd","Type":"ContainerStarted","Data":"8f0ad6eb6c43a2590152dc0d0fd36d7122927bf1dc9dd7acf73ae70f71b50c9e"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.564349 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" event={"ID":"5b4961c9-bbce-41a1-815e-9e953082a574","Type":"ContainerDied","Data":"bc2e34fdb268718c57a5e4f1a6b03a2d2bc2c307e0c2719845c142a9d484bd1e"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.564391 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-lns5b" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.564418 4682 scope.go:117] "RemoveContainer" containerID="9a158f5bf1938daa535c83a2ce93ac138d7659ab4241f120dba9bc815b5b787e" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.566869 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.571308 4682 generic.go:334] "Generic (PLEG): container finished" podID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerID="262a2eaa41ed02832be58b796ae0f29f7a98d824957a7877a0377701f25f0224" exitCode=0 Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.571390 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerDied","Data":"262a2eaa41ed02832be58b796ae0f29f7a98d824957a7877a0377701f25f0224"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.574432 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" event={"ID":"74c8133d-aa41-4891-8a66-fafa28cfd141","Type":"ContainerStarted","Data":"8d06ea4c0c6e56d9998ee51f8114cf0d298ed79d60c83cae719f84a58029a493"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.574618 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.579088 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"13b6a06f-420a-420d-8a7c-5a80d312ec79","Type":"ContainerStarted","Data":"2072cbfd7929a7cfb472429a8307ec76263d4703507dda33b798e481d6426571"} Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.609876 4682 scope.go:117] "RemoveContainer" containerID="971bd5f6a593f1bd2bf06d169c545e8e8812c95d5249a092d7ce9755f6c61c16" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.655018 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=18.049225266 podStartE2EDuration="35.654999985s" podCreationTimestamp="2025-12-10 11:05:12 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.273053842 +0000 UTC m=+1209.593264592" lastFinishedPulling="2025-12-10 11:05:46.878828561 +0000 UTC m=+1227.199039311" observedRunningTime="2025-12-10 11:05:47.645673279 +0000 UTC m=+1227.965884039" watchObservedRunningTime="2025-12-10 11:05:47.654999985 +0000 UTC m=+1227.975210735" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.744447 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" podStartSLOduration=-9223372005.110352 podStartE2EDuration="31.744423867s" podCreationTimestamp="2025-12-10 11:05:16 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.550093206 +0000 UTC m=+1209.870303966" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:05:47.681540245 +0000 UTC m=+1228.001750985" watchObservedRunningTime="2025-12-10 11:05:47.744423867 +0000 UTC m=+1228.064634637" Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.786179 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-lns5b"] Dec 10 11:05:47 crc kubenswrapper[4682]: I1210 11:05:47.819416 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-lns5b"] Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.088179 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-n9b92"] Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.407221 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:48 crc kubenswrapper[4682]: E1210 11:05:48.407832 4682 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:05:48 crc kubenswrapper[4682]: E1210 11:05:48.407851 4682 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:05:48 crc kubenswrapper[4682]: E1210 11:05:48.407901 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift podName:b7a82b72-0262-4a74-becf-36ead02cb92c nodeName:}" failed. No retries permitted until 2025-12-10 11:05:50.407881521 +0000 UTC m=+1230.728092281 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift") pod "swift-storage-0" (UID: "b7a82b72-0262-4a74-becf-36ead02cb92c") : configmap "swift-ring-files" not found Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.408113 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b4961c9-bbce-41a1-815e-9e953082a574" path="/var/lib/kubelet/pods/5b4961c9-bbce-41a1-815e-9e953082a574/volumes" Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.591697 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"97a3c791-f1b7-4665-ae8b-fa87d1ee73e1","Type":"ContainerStarted","Data":"cf74d222118ad780f284707ff4eada97610a14e52ed22b09dd4bff8b526d08ee"} Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.594407 4682 generic.go:334] "Generic (PLEG): container finished" podID="edac3774-26dc-4253-8c10-9400ae914cfd" containerID="dd363c2b9d3c8b4c6bb8b4f9e0eced16d907f0ef190d04fe053e0a521fd7848a" exitCode=0 Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.594481 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" event={"ID":"edac3774-26dc-4253-8c10-9400ae914cfd","Type":"ContainerDied","Data":"dd363c2b9d3c8b4c6bb8b4f9e0eced16d907f0ef190d04fe053e0a521fd7848a"} Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.616747 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=21.813786901 podStartE2EDuration="39.616729505s" podCreationTimestamp="2025-12-10 11:05:09 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.578029761 +0000 UTC m=+1209.898240511" lastFinishedPulling="2025-12-10 11:05:47.380972365 +0000 UTC m=+1227.701183115" observedRunningTime="2025-12-10 11:05:48.613005527 +0000 UTC m=+1228.933216277" watchObservedRunningTime="2025-12-10 11:05:48.616729505 +0000 UTC m=+1228.936940255" Dec 10 11:05:48 crc kubenswrapper[4682]: I1210 11:05:48.618573 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:49 crc kubenswrapper[4682]: W1210 11:05:49.014003 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c235968_0ec4_4c4f_98c4_6b19fa58e826.slice/crio-50979db329d65936b92afb631b2e06e6922fab5713756f9eb6477cecd477441e WatchSource:0}: Error finding container 50979db329d65936b92afb631b2e06e6922fab5713756f9eb6477cecd477441e: Status 404 returned error can't find the container with id 50979db329d65936b92afb631b2e06e6922fab5713756f9eb6477cecd477441e Dec 10 11:05:49 crc kubenswrapper[4682]: I1210 11:05:49.605308 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-n9b92" event={"ID":"4c235968-0ec4-4c4f-98c4-6b19fa58e826","Type":"ContainerStarted","Data":"50979db329d65936b92afb631b2e06e6922fab5713756f9eb6477cecd477441e"} Dec 10 11:05:49 crc kubenswrapper[4682]: I1210 11:05:49.608513 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:05:49 crc kubenswrapper[4682]: I1210 11:05:49.618627 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:49 crc kubenswrapper[4682]: I1210 11:05:49.669692 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.324554 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.365525 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.450095 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:50 crc kubenswrapper[4682]: E1210 11:05:50.450410 4682 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:05:50 crc kubenswrapper[4682]: E1210 11:05:50.450458 4682 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:05:50 crc kubenswrapper[4682]: E1210 11:05:50.450554 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift podName:b7a82b72-0262-4a74-becf-36ead02cb92c nodeName:}" failed. No retries permitted until 2025-12-10 11:05:54.450529726 +0000 UTC m=+1234.770740556 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift") pod "swift-storage-0" (UID: "b7a82b72-0262-4a74-becf-36ead02cb92c") : configmap "swift-ring-files" not found Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.619002 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" event={"ID":"edac3774-26dc-4253-8c10-9400ae914cfd","Type":"ContainerStarted","Data":"ec4632f76979ff8110dc89abd314ffd8e2bd72ece4b909eca510d4c11193087d"} Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.619656 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.619683 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.638379 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" podStartSLOduration=5.638360285 podStartE2EDuration="5.638360285s" podCreationTimestamp="2025-12-10 11:05:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:05:50.633975907 +0000 UTC m=+1230.954186657" watchObservedRunningTime="2025-12-10 11:05:50.638360285 +0000 UTC m=+1230.958571035" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.668235 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.673890 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.951799 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkflb"] Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.991321 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-wqf8l"] Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.993894 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:50 crc kubenswrapper[4682]: I1210 11:05:50.998667 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.000155 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-wqf8l"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.062305 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-ovsdbserver-nb\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.062377 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9lz6\" (UniqueName: \"kubernetes.io/projected/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-kube-api-access-s9lz6\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.062484 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-config\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.062516 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-dns-svc\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.143859 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.145955 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.151407 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.151628 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.151738 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.151952 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-j2zp4" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.164447 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-wqf8l"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.164707 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.164807 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjl6j\" (UniqueName: \"kubernetes.io/projected/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-kube-api-access-sjl6j\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.164849 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-ovsdbserver-nb\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.164905 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9lz6\" (UniqueName: \"kubernetes.io/projected/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-kube-api-access-s9lz6\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.164959 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-scripts\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165001 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165039 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-config\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165073 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: E1210 11:05:51.165090 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-s9lz6 ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" podUID="e5d5dc25-c180-48ab-bcff-20a5f6cc7773" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165100 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-config\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165169 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-dns-svc\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165211 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.165978 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-dns-svc\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.166029 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-config\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.166196 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-ovsdbserver-nb\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.173326 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.258317 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-clb7f"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.262202 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.269131 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.269289 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9lz6\" (UniqueName: \"kubernetes.io/projected/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-kube-api-access-s9lz6\") pod \"dnsmasq-dns-57d65f699f-wqf8l\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.269686 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-clb7f"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.275266 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjl6j\" (UniqueName: \"kubernetes.io/projected/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-kube-api-access-sjl6j\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.275744 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-scripts\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.275847 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.275951 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-config\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.276044 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.276196 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.276392 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.281411 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.284271 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-scripts\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.284773 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.292703 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-config\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.292714 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.293622 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.298750 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-w9qrd"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.299995 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.305660 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-w9qrd"] Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.305860 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.310856 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjl6j\" (UniqueName: \"kubernetes.io/projected/88e4bd3e-e940-489b-9d88-d40fd96bf0cd-kube-api-access-sjl6j\") pod \"ovn-northd-0\" (UID: \"88e4bd3e-e940-489b-9d88-d40fd96bf0cd\") " pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.377875 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-config\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.377937 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8p78\" (UniqueName: \"kubernetes.io/projected/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-kube-api-access-z8p78\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.377975 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378007 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378026 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-combined-ca-bundle\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378057 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-ovn-rundir\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378087 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378114 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgkfc\" (UniqueName: \"kubernetes.io/projected/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-kube-api-access-fgkfc\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378138 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-config\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378158 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-ovs-rundir\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.378236 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.479555 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480643 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-config\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480698 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8p78\" (UniqueName: \"kubernetes.io/projected/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-kube-api-access-z8p78\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480738 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480762 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480782 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-combined-ca-bundle\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480829 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-ovn-rundir\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480882 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480927 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgkfc\" (UniqueName: \"kubernetes.io/projected/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-kube-api-access-fgkfc\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480974 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-config\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.481003 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-ovs-rundir\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.481350 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-ovs-rundir\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.480546 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.482285 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-config\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.483234 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.484420 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.485053 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-ovn-rundir\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.485758 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-config\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.487857 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-combined-ca-bundle\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.521077 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.525590 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgkfc\" (UniqueName: \"kubernetes.io/projected/26b1eabc-8b9f-4f9d-99ba-8c79f047e55e-kube-api-access-fgkfc\") pod \"ovn-controller-metrics-w9qrd\" (UID: \"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e\") " pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.534491 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8p78\" (UniqueName: \"kubernetes.io/projected/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-kube-api-access-z8p78\") pod \"dnsmasq-dns-b8fbc5445-clb7f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.539906 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.635202 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.656835 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.663930 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.668165 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-w9qrd" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.684006 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-ovsdbserver-nb\") pod \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.684111 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-dns-svc\") pod \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.684171 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-config\") pod \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.684270 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9lz6\" (UniqueName: \"kubernetes.io/projected/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-kube-api-access-s9lz6\") pod \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\" (UID: \"e5d5dc25-c180-48ab-bcff-20a5f6cc7773\") " Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.685552 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-config" (OuterVolumeSpecName: "config") pod "e5d5dc25-c180-48ab-bcff-20a5f6cc7773" (UID: "e5d5dc25-c180-48ab-bcff-20a5f6cc7773"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.686200 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e5d5dc25-c180-48ab-bcff-20a5f6cc7773" (UID: "e5d5dc25-c180-48ab-bcff-20a5f6cc7773"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.686377 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e5d5dc25-c180-48ab-bcff-20a5f6cc7773" (UID: "e5d5dc25-c180-48ab-bcff-20a5f6cc7773"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.688819 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-kube-api-access-s9lz6" (OuterVolumeSpecName: "kube-api-access-s9lz6") pod "e5d5dc25-c180-48ab-bcff-20a5f6cc7773" (UID: "e5d5dc25-c180-48ab-bcff-20a5f6cc7773"). InnerVolumeSpecName "kube-api-access-s9lz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.791870 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9lz6\" (UniqueName: \"kubernetes.io/projected/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-kube-api-access-s9lz6\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.791914 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.791931 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:51 crc kubenswrapper[4682]: I1210 11:05:51.791942 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d5dc25-c180-48ab-bcff-20a5f6cc7773-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:52 crc kubenswrapper[4682]: I1210 11:05:52.642499 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" containerName="dnsmasq-dns" containerID="cri-o://ec4632f76979ff8110dc89abd314ffd8e2bd72ece4b909eca510d4c11193087d" gracePeriod=10 Dec 10 11:05:52 crc kubenswrapper[4682]: I1210 11:05:52.643972 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d65f699f-wqf8l" Dec 10 11:05:52 crc kubenswrapper[4682]: I1210 11:05:52.702950 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-wqf8l"] Dec 10 11:05:52 crc kubenswrapper[4682]: I1210 11:05:52.712116 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d65f699f-wqf8l"] Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.652969 4682 generic.go:334] "Generic (PLEG): container finished" podID="1b6b3db6-e7bd-4c87-a35a-1f398c40436e" containerID="806a8f67a2dfd673598b9e90f44cd94a6f18b32f69116267e9cd42e6881f653c" exitCode=0 Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.653440 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1b6b3db6-e7bd-4c87-a35a-1f398c40436e","Type":"ContainerDied","Data":"806a8f67a2dfd673598b9e90f44cd94a6f18b32f69116267e9cd42e6881f653c"} Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.657041 4682 generic.go:334] "Generic (PLEG): container finished" podID="d9f85710-54c3-4f30-88f6-bb97f9a200e8" containerID="7725213501aff75072815646c590610afda1a0996cdcd576d3d522ef3098a7ca" exitCode=0 Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.657103 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d9f85710-54c3-4f30-88f6-bb97f9a200e8","Type":"ContainerDied","Data":"7725213501aff75072815646c590610afda1a0996cdcd576d3d522ef3098a7ca"} Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.660555 4682 generic.go:334] "Generic (PLEG): container finished" podID="edac3774-26dc-4253-8c10-9400ae914cfd" containerID="ec4632f76979ff8110dc89abd314ffd8e2bd72ece4b909eca510d4c11193087d" exitCode=0 Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.660586 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" event={"ID":"edac3774-26dc-4253-8c10-9400ae914cfd","Type":"ContainerDied","Data":"ec4632f76979ff8110dc89abd314ffd8e2bd72ece4b909eca510d4c11193087d"} Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.712298 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-w9qrd"] Dec 10 11:05:53 crc kubenswrapper[4682]: I1210 11:05:53.728670 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-clb7f"] Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.393309 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5d5dc25-c180-48ab-bcff-20a5f6cc7773" path="/var/lib/kubelet/pods/e5d5dc25-c180-48ab-bcff-20a5f6cc7773/volumes" Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.547491 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:05:54 crc kubenswrapper[4682]: E1210 11:05:54.548192 4682 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:05:54 crc kubenswrapper[4682]: E1210 11:05:54.548239 4682 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:05:54 crc kubenswrapper[4682]: E1210 11:05:54.548304 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift podName:b7a82b72-0262-4a74-becf-36ead02cb92c nodeName:}" failed. No retries permitted until 2025-12-10 11:06:02.548280919 +0000 UTC m=+1242.868491689 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift") pod "swift-storage-0" (UID: "b7a82b72-0262-4a74-becf-36ead02cb92c") : configmap "swift-ring-files" not found Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.811269 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.954146 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-dns-svc\") pod \"edac3774-26dc-4253-8c10-9400ae914cfd\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.954369 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxl2g\" (UniqueName: \"kubernetes.io/projected/edac3774-26dc-4253-8c10-9400ae914cfd-kube-api-access-vxl2g\") pod \"edac3774-26dc-4253-8c10-9400ae914cfd\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.954423 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-config\") pod \"edac3774-26dc-4253-8c10-9400ae914cfd\" (UID: \"edac3774-26dc-4253-8c10-9400ae914cfd\") " Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.957904 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edac3774-26dc-4253-8c10-9400ae914cfd-kube-api-access-vxl2g" (OuterVolumeSpecName: "kube-api-access-vxl2g") pod "edac3774-26dc-4253-8c10-9400ae914cfd" (UID: "edac3774-26dc-4253-8c10-9400ae914cfd"). InnerVolumeSpecName "kube-api-access-vxl2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:05:54 crc kubenswrapper[4682]: I1210 11:05:54.992789 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "edac3774-26dc-4253-8c10-9400ae914cfd" (UID: "edac3774-26dc-4253-8c10-9400ae914cfd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.001953 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-config" (OuterVolumeSpecName: "config") pod "edac3774-26dc-4253-8c10-9400ae914cfd" (UID: "edac3774-26dc-4253-8c10-9400ae914cfd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.057069 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.057113 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxl2g\" (UniqueName: \"kubernetes.io/projected/edac3774-26dc-4253-8c10-9400ae914cfd-kube-api-access-vxl2g\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.057126 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edac3774-26dc-4253-8c10-9400ae914cfd-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.089433 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.683007 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-w9qrd" event={"ID":"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e","Type":"ContainerStarted","Data":"a719c08387e8f0bd1c845d783a50c7e2acdadd4fa8c1f40aa79214499b3f0e16"} Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.684541 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" event={"ID":"edac3774-26dc-4253-8c10-9400ae914cfd","Type":"ContainerDied","Data":"8f0ad6eb6c43a2590152dc0d0fd36d7122927bf1dc9dd7acf73ae70f71b50c9e"} Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.684572 4682 scope.go:117] "RemoveContainer" containerID="ec4632f76979ff8110dc89abd314ffd8e2bd72ece4b909eca510d4c11193087d" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.684673 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-bkflb" Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.689605 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" event={"ID":"92ecd66f-e6fa-4be2-b61c-38fa89fb015f","Type":"ContainerStarted","Data":"ba33ac53f73497edb022300f889eb8515f9a17e62005725ab4a82c7258e57554"} Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.723069 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkflb"] Dec 10 11:05:55 crc kubenswrapper[4682]: I1210 11:05:55.731527 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-bkflb"] Dec 10 11:05:56 crc kubenswrapper[4682]: I1210 11:05:56.391668 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" path="/var/lib/kubelet/pods/edac3774-26dc-4253-8c10-9400ae914cfd/volumes" Dec 10 11:05:56 crc kubenswrapper[4682]: I1210 11:05:56.701724 4682 scope.go:117] "RemoveContainer" containerID="dd363c2b9d3c8b4c6bb8b4f9e0eced16d907f0ef190d04fe053e0a521fd7848a" Dec 10 11:05:56 crc kubenswrapper[4682]: I1210 11:05:56.710785 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"88e4bd3e-e940-489b-9d88-d40fd96bf0cd","Type":"ContainerStarted","Data":"332f6f073d6b797b800eb0c6bc78b8abec578c3a2e6d1745f8d6a061a2e48da4"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.485953 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-querier-5467947bf7-wwt8c" Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.569182 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz" Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.814899 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1b6b3db6-e7bd-4c87-a35a-1f398c40436e","Type":"ContainerStarted","Data":"dddbe8acc40eb09377849693896134607abc737f33867b455f641bcd33d1dda1"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.828790 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d9f85710-54c3-4f30-88f6-bb97f9a200e8","Type":"ContainerStarted","Data":"0cf1c3ed7c7e9c5b554a6925f8aa30972ade0a23f2ca78667f093da46a5e41f2"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.835571 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-n9b92" event={"ID":"4c235968-0ec4-4c4f-98c4-6b19fa58e826","Type":"ContainerStarted","Data":"f822f7223cc4082a8186f6fa10ee5509eb93a60c5d5dc5b1e5d37bd5df6fb85a"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.845118 4682 generic.go:334] "Generic (PLEG): container finished" podID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerID="58543ad41402e0ea26cbd88eae5fe41e99b76bf8f65a97dd2ec057a35488b6b5" exitCode=0 Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.845190 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" event={"ID":"92ecd66f-e6fa-4be2-b61c-38fa89fb015f","Type":"ContainerDied","Data":"58543ad41402e0ea26cbd88eae5fe41e99b76bf8f65a97dd2ec057a35488b6b5"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.866311 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=14.054970284 podStartE2EDuration="57.866288421s" podCreationTimestamp="2025-12-10 11:05:00 +0000 UTC" firstStartedPulling="2025-12-10 11:05:02.940715758 +0000 UTC m=+1183.260926508" lastFinishedPulling="2025-12-10 11:05:46.752033875 +0000 UTC m=+1227.072244645" observedRunningTime="2025-12-10 11:05:57.83881393 +0000 UTC m=+1238.159024680" watchObservedRunningTime="2025-12-10 11:05:57.866288421 +0000 UTC m=+1238.186499171" Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.869882 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-n9b92" podStartSLOduration=3.124742261 podStartE2EDuration="10.869872864s" podCreationTimestamp="2025-12-10 11:05:47 +0000 UTC" firstStartedPulling="2025-12-10 11:05:49.015958029 +0000 UTC m=+1229.336168779" lastFinishedPulling="2025-12-10 11:05:56.761088632 +0000 UTC m=+1237.081299382" observedRunningTime="2025-12-10 11:05:57.85478044 +0000 UTC m=+1238.174991210" watchObservedRunningTime="2025-12-10 11:05:57.869872864 +0000 UTC m=+1238.190083614" Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.871981 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerStarted","Data":"a558d36e3f414458f9ccf78c0375238c90ad6b90a8d9d3ab2fceebae9e3999d6"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.878915 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"55723944-339e-4ed6-9159-9696ca1debeb","Type":"ContainerStarted","Data":"4ed6f4312759b3c15793ffb1cb698309cd163d824a8da90ba907ef807aff6ad1"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.891509 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-w9qrd" event={"ID":"26b1eabc-8b9f-4f9d-99ba-8c79f047e55e","Type":"ContainerStarted","Data":"246768f20e2bbeb717381cda17061cb9af9010691e66ad2f89565dd68c040a30"} Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.899041 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=14.151934415 podStartE2EDuration="56.899019716s" podCreationTimestamp="2025-12-10 11:05:01 +0000 UTC" firstStartedPulling="2025-12-10 11:05:04.133997182 +0000 UTC m=+1184.454207932" lastFinishedPulling="2025-12-10 11:05:46.881082483 +0000 UTC m=+1227.201293233" observedRunningTime="2025-12-10 11:05:57.893767222 +0000 UTC m=+1238.213977972" watchObservedRunningTime="2025-12-10 11:05:57.899019716 +0000 UTC m=+1238.219230466" Dec 10 11:05:57 crc kubenswrapper[4682]: I1210 11:05:57.921047 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-w9qrd" podStartSLOduration=6.920395017 podStartE2EDuration="6.920395017s" podCreationTimestamp="2025-12-10 11:05:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:05:57.912278362 +0000 UTC m=+1238.232489122" watchObservedRunningTime="2025-12-10 11:05:57.920395017 +0000 UTC m=+1238.240605767" Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.493690 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-lokistack-ingester-0" podUID="ea1f94a0-5b00-4aac-85ae-f7af9df196b6" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.518011 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-compactor-0" Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.633809 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-index-gateway-0" Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.901891 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"88e4bd3e-e940-489b-9d88-d40fd96bf0cd","Type":"ContainerStarted","Data":"454db107a04f8d18fa3c21eb1535e51b91dd23c9076512ad9097b20ff2596656"} Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.904636 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" event={"ID":"92ecd66f-e6fa-4be2-b61c-38fa89fb015f","Type":"ContainerStarted","Data":"c742871a2c0d5f66fbc3ab0a2aef5b3c2eefd2ab03c4b9dea0a1dd5f2d9fe256"} Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.905021 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:05:58 crc kubenswrapper[4682]: I1210 11:05:58.928727 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" podStartSLOduration=7.9287085699999995 podStartE2EDuration="7.92870857s" podCreationTimestamp="2025-12-10 11:05:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:05:58.926175141 +0000 UTC m=+1239.246385891" watchObservedRunningTime="2025-12-10 11:05:58.92870857 +0000 UTC m=+1239.248919320" Dec 10 11:05:59 crc kubenswrapper[4682]: I1210 11:05:59.918839 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"88e4bd3e-e940-489b-9d88-d40fd96bf0cd","Type":"ContainerStarted","Data":"86feff119358f310c14d8c5b0ce917c66bc4b0fe2184c051c1edb1e6f753fb4c"} Dec 10 11:05:59 crc kubenswrapper[4682]: I1210 11:05:59.919305 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 10 11:05:59 crc kubenswrapper[4682]: I1210 11:05:59.943643 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=7.257837609 podStartE2EDuration="8.943624339s" podCreationTimestamp="2025-12-10 11:05:51 +0000 UTC" firstStartedPulling="2025-12-10 11:05:56.700888506 +0000 UTC m=+1237.021099256" lastFinishedPulling="2025-12-10 11:05:58.386675246 +0000 UTC m=+1238.706885986" observedRunningTime="2025-12-10 11:05:59.935513766 +0000 UTC m=+1240.255724516" watchObservedRunningTime="2025-12-10 11:05:59.943624339 +0000 UTC m=+1240.263835089" Dec 10 11:06:01 crc kubenswrapper[4682]: I1210 11:06:01.153143 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"55723944-339e-4ed6-9159-9696ca1debeb","Type":"ContainerStarted","Data":"a40e355724f9c9871339c55f2ecfd150cb383f20f88523dc9f6c3c76b4581b7f"} Dec 10 11:06:01 crc kubenswrapper[4682]: I1210 11:06:01.154035 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Dec 10 11:06:01 crc kubenswrapper[4682]: I1210 11:06:01.159354 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Dec 10 11:06:01 crc kubenswrapper[4682]: I1210 11:06:01.160426 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerStarted","Data":"6ed67385c172ec049376897327238b1f3347082476bd951631ae94c313027901"} Dec 10 11:06:01 crc kubenswrapper[4682]: I1210 11:06:01.188334 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=32.222498105 podStartE2EDuration="56.188305419s" podCreationTimestamp="2025-12-10 11:05:05 +0000 UTC" firstStartedPulling="2025-12-10 11:05:29.203246501 +0000 UTC m=+1209.523457251" lastFinishedPulling="2025-12-10 11:05:53.169053815 +0000 UTC m=+1233.489264565" observedRunningTime="2025-12-10 11:06:01.180823644 +0000 UTC m=+1241.501034404" watchObservedRunningTime="2025-12-10 11:06:01.188305419 +0000 UTC m=+1241.508516169" Dec 10 11:06:02 crc kubenswrapper[4682]: I1210 11:06:02.187612 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 10 11:06:02 crc kubenswrapper[4682]: I1210 11:06:02.188001 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 10 11:06:02 crc kubenswrapper[4682]: I1210 11:06:02.601097 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:06:02 crc kubenswrapper[4682]: E1210 11:06:02.601413 4682 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 11:06:02 crc kubenswrapper[4682]: E1210 11:06:02.601449 4682 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 11:06:02 crc kubenswrapper[4682]: E1210 11:06:02.601528 4682 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift podName:b7a82b72-0262-4a74-becf-36ead02cb92c nodeName:}" failed. No retries permitted until 2025-12-10 11:06:18.601506539 +0000 UTC m=+1258.921717289 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift") pod "swift-storage-0" (UID: "b7a82b72-0262-4a74-becf-36ead02cb92c") : configmap "swift-ring-files" not found Dec 10 11:06:02 crc kubenswrapper[4682]: I1210 11:06:02.985137 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 10 11:06:02 crc kubenswrapper[4682]: I1210 11:06:02.985554 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 10 11:06:03 crc kubenswrapper[4682]: I1210 11:06:03.177612 4682 generic.go:334] "Generic (PLEG): container finished" podID="c211ac37-0b53-466f-ad83-7062f681c32b" containerID="ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b" exitCode=0 Dec 10 11:06:03 crc kubenswrapper[4682]: I1210 11:06:03.177676 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c211ac37-0b53-466f-ad83-7062f681c32b","Type":"ContainerDied","Data":"ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b"} Dec 10 11:06:03 crc kubenswrapper[4682]: I1210 11:06:03.181020 4682 generic.go:334] "Generic (PLEG): container finished" podID="7362d622-686c-48e5-b0de-562fae10bc35" containerID="d334adfb35be27b46279f9611f45e3a210c4fdbf44ceb555dadc28eb89ea99ae" exitCode=0 Dec 10 11:06:03 crc kubenswrapper[4682]: I1210 11:06:03.181158 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7362d622-686c-48e5-b0de-562fae10bc35","Type":"ContainerDied","Data":"d334adfb35be27b46279f9611f45e3a210c4fdbf44ceb555dadc28eb89ea99ae"} Dec 10 11:06:04 crc kubenswrapper[4682]: I1210 11:06:04.190236 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7362d622-686c-48e5-b0de-562fae10bc35","Type":"ContainerStarted","Data":"91fdcc557b4d22e64f4b9de115191e912904338fe6401293a35d7d82e5808b58"} Dec 10 11:06:04 crc kubenswrapper[4682]: I1210 11:06:04.190954 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 10 11:06:04 crc kubenswrapper[4682]: I1210 11:06:04.192116 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c211ac37-0b53-466f-ad83-7062f681c32b","Type":"ContainerStarted","Data":"1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f"} Dec 10 11:06:04 crc kubenswrapper[4682]: I1210 11:06:04.192264 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:06:04 crc kubenswrapper[4682]: I1210 11:06:04.223118 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.02388678 podStartE2EDuration="1m5.223098728s" podCreationTimestamp="2025-12-10 11:04:59 +0000 UTC" firstStartedPulling="2025-12-10 11:05:01.753500567 +0000 UTC m=+1182.073711317" lastFinishedPulling="2025-12-10 11:05:28.952712515 +0000 UTC m=+1209.272923265" observedRunningTime="2025-12-10 11:06:04.21583848 +0000 UTC m=+1244.536049240" watchObservedRunningTime="2025-12-10 11:06:04.223098728 +0000 UTC m=+1244.543309478" Dec 10 11:06:04 crc kubenswrapper[4682]: I1210 11:06:04.239982 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.341338287 podStartE2EDuration="1m5.239959326s" podCreationTimestamp="2025-12-10 11:04:59 +0000 UTC" firstStartedPulling="2025-12-10 11:05:02.07503332 +0000 UTC m=+1182.395244070" lastFinishedPulling="2025-12-10 11:05:28.973654359 +0000 UTC m=+1209.293865109" observedRunningTime="2025-12-10 11:06:04.239420679 +0000 UTC m=+1244.559631449" watchObservedRunningTime="2025-12-10 11:06:04.239959326 +0000 UTC m=+1244.560170086" Dec 10 11:06:05 crc kubenswrapper[4682]: I1210 11:06:05.152091 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 10 11:06:05 crc kubenswrapper[4682]: I1210 11:06:05.271235 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.214367 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerStarted","Data":"d6f8b3e16818d5a71dd98e12a665333685c9b9cfa96fe76813f9ec95bcef87e5"} Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.244288 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=24.27168043 podStartE2EDuration="1m1.244268156s" podCreationTimestamp="2025-12-10 11:05:05 +0000 UTC" firstStartedPulling="2025-12-10 11:05:28.953685856 +0000 UTC m=+1209.273896616" lastFinishedPulling="2025-12-10 11:06:05.926273592 +0000 UTC m=+1246.246484342" observedRunningTime="2025-12-10 11:06:06.237461913 +0000 UTC m=+1246.557672673" watchObservedRunningTime="2025-12-10 11:06:06.244268156 +0000 UTC m=+1246.564478906" Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.632225 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.632273 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.634787 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.665873 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.717563 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-j6d45"] Dec 10 11:06:06 crc kubenswrapper[4682]: I1210 11:06:06.717784 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerName="dnsmasq-dns" containerID="cri-o://3471b045b620bb2b34b9814fc9e46ad102fa41d37a049588531b2c75b6396389" gracePeriod=10 Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.109681 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.227063 4682 generic.go:334] "Generic (PLEG): container finished" podID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerID="3471b045b620bb2b34b9814fc9e46ad102fa41d37a049588531b2c75b6396389" exitCode=0 Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.227148 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" event={"ID":"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8","Type":"ContainerDied","Data":"3471b045b620bb2b34b9814fc9e46ad102fa41d37a049588531b2c75b6396389"} Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.227196 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" event={"ID":"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8","Type":"ContainerDied","Data":"b2edc23f4829464b3ca5aed596b32fc7293cdde5135ee9c40cf674d54eaa2319"} Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.227212 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2edc23f4829464b3ca5aed596b32fc7293cdde5135ee9c40cf674d54eaa2319" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.230282 4682 generic.go:334] "Generic (PLEG): container finished" podID="4c235968-0ec4-4c4f-98c4-6b19fa58e826" containerID="f822f7223cc4082a8186f6fa10ee5509eb93a60c5d5dc5b1e5d37bd5df6fb85a" exitCode=0 Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.230368 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-n9b92" event={"ID":"4c235968-0ec4-4c4f-98c4-6b19fa58e826","Type":"ContainerDied","Data":"f822f7223cc4082a8186f6fa10ee5509eb93a60c5d5dc5b1e5d37bd5df6fb85a"} Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.240919 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.265268 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.273939 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.408316 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-dns-svc\") pod \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.408417 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-config\") pod \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.408456 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkrgx\" (UniqueName: \"kubernetes.io/projected/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-kube-api-access-qkrgx\") pod \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\" (UID: \"bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8\") " Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.419602 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-kube-api-access-qkrgx" (OuterVolumeSpecName: "kube-api-access-qkrgx") pod "bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" (UID: "bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8"). InnerVolumeSpecName "kube-api-access-qkrgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.499028 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-config" (OuterVolumeSpecName: "config") pod "bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" (UID: "bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.509749 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" (UID: "bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.510534 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.510564 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.510576 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkrgx\" (UniqueName: \"kubernetes.io/projected/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8-kube-api-access-qkrgx\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:07 crc kubenswrapper[4682]: I1210 11:06:07.588804 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-distributor-664b687b54-w4wxz" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.236080 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-j6d45" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.293674 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-j6d45"] Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.305965 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-j6d45"] Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.391850 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" path="/var/lib/kubelet/pods/bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8/volumes" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.507254 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-lokistack-ingester-0" podUID="ea1f94a0-5b00-4aac-85ae-f7af9df196b6" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.660078 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731341 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-swiftconf\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731454 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-ring-data-devices\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731574 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-combined-ca-bundle\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731615 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-scripts\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731654 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-dispersionconf\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731691 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c235968-0ec4-4c4f-98c4-6b19fa58e826-etc-swift\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731801 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhfwg\" (UniqueName: \"kubernetes.io/projected/4c235968-0ec4-4c4f-98c4-6b19fa58e826-kube-api-access-jhfwg\") pod \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\" (UID: \"4c235968-0ec4-4c4f-98c4-6b19fa58e826\") " Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.731920 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.732279 4682 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.736587 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c235968-0ec4-4c4f-98c4-6b19fa58e826-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.739084 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c235968-0ec4-4c4f-98c4-6b19fa58e826-kube-api-access-jhfwg" (OuterVolumeSpecName: "kube-api-access-jhfwg") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "kube-api-access-jhfwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.781697 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-scripts" (OuterVolumeSpecName: "scripts") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.783333 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.787982 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.791666 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "4c235968-0ec4-4c4f-98c4-6b19fa58e826" (UID: "4c235968-0ec4-4c4f-98c4-6b19fa58e826"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.834392 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.834425 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c235968-0ec4-4c4f-98c4-6b19fa58e826-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.834433 4682 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.834443 4682 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c235968-0ec4-4c4f-98c4-6b19fa58e826-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.834452 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhfwg\" (UniqueName: \"kubernetes.io/projected/4c235968-0ec4-4c4f-98c4-6b19fa58e826-kube-api-access-jhfwg\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.834460 4682 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c235968-0ec4-4c4f-98c4-6b19fa58e826-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.906403 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-c2zgs"] Dec 10 11:06:08 crc kubenswrapper[4682]: E1210 11:06:08.906987 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" containerName="init" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907007 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" containerName="init" Dec 10 11:06:08 crc kubenswrapper[4682]: E1210 11:06:08.907034 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerName="dnsmasq-dns" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907042 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerName="dnsmasq-dns" Dec 10 11:06:08 crc kubenswrapper[4682]: E1210 11:06:08.907060 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerName="init" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907066 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerName="init" Dec 10 11:06:08 crc kubenswrapper[4682]: E1210 11:06:08.907078 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" containerName="dnsmasq-dns" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907085 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" containerName="dnsmasq-dns" Dec 10 11:06:08 crc kubenswrapper[4682]: E1210 11:06:08.907103 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c235968-0ec4-4c4f-98c4-6b19fa58e826" containerName="swift-ring-rebalance" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907109 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c235968-0ec4-4c4f-98c4-6b19fa58e826" containerName="swift-ring-rebalance" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907408 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf098ad9-7ec1-4d6c-85a9-a28ae9ba90a8" containerName="dnsmasq-dns" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907425 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c235968-0ec4-4c4f-98c4-6b19fa58e826" containerName="swift-ring-rebalance" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.907441 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="edac3774-26dc-4253-8c10-9400ae914cfd" containerName="dnsmasq-dns" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.908742 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.932250 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-1423-account-create-update-lzxsx"] Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.933571 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.936251 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.936410 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8qgn\" (UniqueName: \"kubernetes.io/projected/722a4519-42a8-4f50-8665-59e8bb94a134-kube-api-access-w8qgn\") pod \"glance-db-create-c2zgs\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.936895 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/722a4519-42a8-4f50-8665-59e8bb94a134-operator-scripts\") pod \"glance-db-create-c2zgs\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.941165 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-c2zgs"] Dec 10 11:06:08 crc kubenswrapper[4682]: I1210 11:06:08.954771 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1423-account-create-update-lzxsx"] Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.038172 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/722a4519-42a8-4f50-8665-59e8bb94a134-operator-scripts\") pod \"glance-db-create-c2zgs\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.038264 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba42c85-eeda-4249-8026-6581d57f8dcf-operator-scripts\") pod \"glance-1423-account-create-update-lzxsx\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.038295 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktrwh\" (UniqueName: \"kubernetes.io/projected/dba42c85-eeda-4249-8026-6581d57f8dcf-kube-api-access-ktrwh\") pod \"glance-1423-account-create-update-lzxsx\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.038392 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8qgn\" (UniqueName: \"kubernetes.io/projected/722a4519-42a8-4f50-8665-59e8bb94a134-kube-api-access-w8qgn\") pod \"glance-db-create-c2zgs\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.038934 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/722a4519-42a8-4f50-8665-59e8bb94a134-operator-scripts\") pod \"glance-db-create-c2zgs\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.060483 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8qgn\" (UniqueName: \"kubernetes.io/projected/722a4519-42a8-4f50-8665-59e8bb94a134-kube-api-access-w8qgn\") pod \"glance-db-create-c2zgs\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.140093 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba42c85-eeda-4249-8026-6581d57f8dcf-operator-scripts\") pod \"glance-1423-account-create-update-lzxsx\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.140375 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktrwh\" (UniqueName: \"kubernetes.io/projected/dba42c85-eeda-4249-8026-6581d57f8dcf-kube-api-access-ktrwh\") pod \"glance-1423-account-create-update-lzxsx\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.140914 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba42c85-eeda-4249-8026-6581d57f8dcf-operator-scripts\") pod \"glance-1423-account-create-update-lzxsx\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.163092 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktrwh\" (UniqueName: \"kubernetes.io/projected/dba42c85-eeda-4249-8026-6581d57f8dcf-kube-api-access-ktrwh\") pod \"glance-1423-account-create-update-lzxsx\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.232236 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.244775 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-n9b92" event={"ID":"4c235968-0ec4-4c4f-98c4-6b19fa58e826","Type":"ContainerDied","Data":"50979db329d65936b92afb631b2e06e6922fab5713756f9eb6477cecd477441e"} Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.244813 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50979db329d65936b92afb631b2e06e6922fab5713756f9eb6477cecd477441e" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.244812 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-n9b92" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.253854 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.808899 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1423-account-create-update-lzxsx"] Dec 10 11:06:09 crc kubenswrapper[4682]: W1210 11:06:09.810661 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddba42c85_eeda_4249_8026_6581d57f8dcf.slice/crio-cd8dc470eebd9fa5dcb54bc2d4a968fbf18cb2dd6f1a1ea0539dc5c884088b9b WatchSource:0}: Error finding container cd8dc470eebd9fa5dcb54bc2d4a968fbf18cb2dd6f1a1ea0539dc5c884088b9b: Status 404 returned error can't find the container with id cd8dc470eebd9fa5dcb54bc2d4a968fbf18cb2dd6f1a1ea0539dc5c884088b9b Dec 10 11:06:09 crc kubenswrapper[4682]: I1210 11:06:09.874118 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-c2zgs"] Dec 10 11:06:09 crc kubenswrapper[4682]: W1210 11:06:09.878892 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod722a4519_42a8_4f50_8665_59e8bb94a134.slice/crio-f74eebb1ab0e48a883728e7509d52719e9e3a4c12ecf1c8a54fa847348b1f3b9 WatchSource:0}: Error finding container f74eebb1ab0e48a883728e7509d52719e9e3a4c12ecf1c8a54fa847348b1f3b9: Status 404 returned error can't find the container with id f74eebb1ab0e48a883728e7509d52719e9e3a4c12ecf1c8a54fa847348b1f3b9 Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.256545 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-c2zgs" event={"ID":"722a4519-42a8-4f50-8665-59e8bb94a134","Type":"ContainerStarted","Data":"f74eebb1ab0e48a883728e7509d52719e9e3a4c12ecf1c8a54fa847348b1f3b9"} Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.258725 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1423-account-create-update-lzxsx" event={"ID":"dba42c85-eeda-4249-8026-6581d57f8dcf","Type":"ContainerStarted","Data":"cd8dc470eebd9fa5dcb54bc2d4a968fbf18cb2dd6f1a1ea0539dc5c884088b9b"} Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.322214 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-w7jxw" podUID="df9d7d76-fa02-41c5-b652-ea9b7b00bd00" containerName="ovn-controller" probeResult="failure" output=< Dec 10 11:06:10 crc kubenswrapper[4682]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 10 11:06:10 crc kubenswrapper[4682]: > Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.338623 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.344229 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-4b2ch" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.435253 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.435554 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="prometheus" containerID="cri-o://a558d36e3f414458f9ccf78c0375238c90ad6b90a8d9d3ab2fceebae9e3999d6" gracePeriod=600 Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.435617 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="thanos-sidecar" containerID="cri-o://d6f8b3e16818d5a71dd98e12a665333685c9b9cfa96fe76813f9ec95bcef87e5" gracePeriod=600 Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.435684 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="config-reloader" containerID="cri-o://6ed67385c172ec049376897327238b1f3347082476bd951631ae94c313027901" gracePeriod=600 Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.635977 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-w7jxw-config-fjlkf"] Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.637114 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.640372 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.662657 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w7jxw-config-fjlkf"] Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.690112 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run-ovn\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.690189 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzhg8\" (UniqueName: \"kubernetes.io/projected/d4794f5b-e93f-4adf-93ba-09e3394e962d-kube-api-access-dzhg8\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.690272 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-additional-scripts\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.690393 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-log-ovn\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.690422 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-scripts\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.690548 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.792710 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793003 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793026 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run-ovn\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793060 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzhg8\" (UniqueName: \"kubernetes.io/projected/d4794f5b-e93f-4adf-93ba-09e3394e962d-kube-api-access-dzhg8\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793105 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-additional-scripts\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793146 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run-ovn\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793161 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-log-ovn\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793180 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-scripts\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793515 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-log-ovn\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.793945 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-additional-scripts\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.795021 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-scripts\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.810634 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzhg8\" (UniqueName: \"kubernetes.io/projected/d4794f5b-e93f-4adf-93ba-09e3394e962d-kube-api-access-dzhg8\") pod \"ovn-controller-w7jxw-config-fjlkf\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:10 crc kubenswrapper[4682]: I1210 11:06:10.966562 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.286285 4682 generic.go:334] "Generic (PLEG): container finished" podID="dba42c85-eeda-4249-8026-6581d57f8dcf" containerID="1b76cc41cf90775abb0f780a203fa619cb5730b0315f5693ad956531adcfe62c" exitCode=0 Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.286401 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1423-account-create-update-lzxsx" event={"ID":"dba42c85-eeda-4249-8026-6581d57f8dcf","Type":"ContainerDied","Data":"1b76cc41cf90775abb0f780a203fa619cb5730b0315f5693ad956531adcfe62c"} Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.295306 4682 generic.go:334] "Generic (PLEG): container finished" podID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerID="d6f8b3e16818d5a71dd98e12a665333685c9b9cfa96fe76813f9ec95bcef87e5" exitCode=0 Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.295352 4682 generic.go:334] "Generic (PLEG): container finished" podID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerID="6ed67385c172ec049376897327238b1f3347082476bd951631ae94c313027901" exitCode=0 Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.295361 4682 generic.go:334] "Generic (PLEG): container finished" podID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerID="a558d36e3f414458f9ccf78c0375238c90ad6b90a8d9d3ab2fceebae9e3999d6" exitCode=0 Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.295401 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerDied","Data":"d6f8b3e16818d5a71dd98e12a665333685c9b9cfa96fe76813f9ec95bcef87e5"} Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.295493 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerDied","Data":"6ed67385c172ec049376897327238b1f3347082476bd951631ae94c313027901"} Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.295508 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerDied","Data":"a558d36e3f414458f9ccf78c0375238c90ad6b90a8d9d3ab2fceebae9e3999d6"} Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.296932 4682 generic.go:334] "Generic (PLEG): container finished" podID="722a4519-42a8-4f50-8665-59e8bb94a134" containerID="3acb44d1538c9f2a339233094a9bed57332635b97b9cbec3d21db0c657f086c9" exitCode=0 Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.297065 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-c2zgs" event={"ID":"722a4519-42a8-4f50-8665-59e8bb94a134","Type":"ContainerDied","Data":"3acb44d1538c9f2a339233094a9bed57332635b97b9cbec3d21db0c657f086c9"} Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.449285 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w7jxw-config-fjlkf"] Dec 10 11:06:11 crc kubenswrapper[4682]: W1210 11:06:11.456600 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4794f5b_e93f_4adf_93ba_09e3394e962d.slice/crio-2ab29b1a5b03892c1731c2b5929746007c3fe5a6f7480df2c3b3455501823579 WatchSource:0}: Error finding container 2ab29b1a5b03892c1731c2b5929746007c3fe5a6f7480df2c3b3455501823579: Status 404 returned error can't find the container with id 2ab29b1a5b03892c1731c2b5929746007c3fe5a6f7480df2c3b3455501823579 Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.483160 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.618960 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619460 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619527 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-prometheus-metric-storage-rulefiles-0\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619575 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tswqz\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-kube-api-access-tswqz\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619599 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-thanos-prometheus-http-client-file\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619675 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config-out\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619753 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-tls-assets\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.619783 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-web-config\") pod \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\" (UID: \"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0\") " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.623032 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.626760 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config" (OuterVolumeSpecName: "config") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.631190 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config-out" (OuterVolumeSpecName: "config-out") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.632664 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.651816 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-kube-api-access-tswqz" (OuterVolumeSpecName: "kube-api-access-tswqz") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "kube-api-access-tswqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.668698 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.722210 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.722238 4682 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.722251 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tswqz\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-kube-api-access-tswqz\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.722260 4682 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.722269 4682 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-config-out\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.722277 4682 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-tls-assets\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.748204 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-web-config" (OuterVolumeSpecName: "web-config") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.823514 4682 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0-web-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.847132 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" (UID: "d67c4ad0-1464-4f7f-9877-8601f9b2c3b0"). InnerVolumeSpecName "pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.851570 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.925709 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") on node \"crc\" " Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.953966 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:06:11 crc kubenswrapper[4682]: I1210 11:06:11.954217 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca") on node "crc" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.027658 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.306731 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w7jxw-config-fjlkf" event={"ID":"d4794f5b-e93f-4adf-93ba-09e3394e962d","Type":"ContainerStarted","Data":"f6642c6e0e77eab4cf0f50f0ad9415f9d054c4805befea4b95b5565ffc58354a"} Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.306777 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w7jxw-config-fjlkf" event={"ID":"d4794f5b-e93f-4adf-93ba-09e3394e962d","Type":"ContainerStarted","Data":"2ab29b1a5b03892c1731c2b5929746007c3fe5a6f7480df2c3b3455501823579"} Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.310169 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d67c4ad0-1464-4f7f-9877-8601f9b2c3b0","Type":"ContainerDied","Data":"41a7f696dce11586a37cad24c828eb6441092497c1ed4aed4c1fcd770e20cc12"} Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.310204 4682 scope.go:117] "RemoveContainer" containerID="d6f8b3e16818d5a71dd98e12a665333685c9b9cfa96fe76813f9ec95bcef87e5" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.310329 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.370754 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-w7jxw-config-fjlkf" podStartSLOduration=2.370738045 podStartE2EDuration="2.370738045s" podCreationTimestamp="2025-12-10 11:06:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:12.329399349 +0000 UTC m=+1252.649610099" watchObservedRunningTime="2025-12-10 11:06:12.370738045 +0000 UTC m=+1252.690948795" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.377687 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.385851 4682 scope.go:117] "RemoveContainer" containerID="6ed67385c172ec049376897327238b1f3347082476bd951631ae94c313027901" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.394560 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440148 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:06:12 crc kubenswrapper[4682]: E1210 11:06:12.440602 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="init-config-reloader" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440624 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="init-config-reloader" Dec 10 11:06:12 crc kubenswrapper[4682]: E1210 11:06:12.440644 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="thanos-sidecar" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440653 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="thanos-sidecar" Dec 10 11:06:12 crc kubenswrapper[4682]: E1210 11:06:12.440674 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="config-reloader" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440683 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="config-reloader" Dec 10 11:06:12 crc kubenswrapper[4682]: E1210 11:06:12.440702 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="prometheus" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440710 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="prometheus" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440929 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="thanos-sidecar" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440947 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="prometheus" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.440971 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" containerName="config-reloader" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.442940 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.445986 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.446218 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.446815 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-hfkrt" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.446963 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.447951 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.448072 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.454065 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.459296 4682 scope.go:117] "RemoveContainer" containerID="a558d36e3f414458f9ccf78c0375238c90ad6b90a8d9d3ab2fceebae9e3999d6" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.474103 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.524560 4682 scope.go:117] "RemoveContainer" containerID="262a2eaa41ed02832be58b796ae0f29f7a98d824957a7877a0377701f25f0224" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.538663 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.538851 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539025 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539055 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f598ce2d-df0a-4477-8c89-126cc5d3a5be-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539168 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5t2t\" (UniqueName: \"kubernetes.io/projected/f598ce2d-df0a-4477-8c89-126cc5d3a5be-kube-api-access-h5t2t\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539238 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539281 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f598ce2d-df0a-4477-8c89-126cc5d3a5be-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539315 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539350 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-config\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539454 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f598ce2d-df0a-4477-8c89-126cc5d3a5be-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.539703 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.641549 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.641829 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.641852 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.641917 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f598ce2d-df0a-4477-8c89-126cc5d3a5be-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.641935 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.641973 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5t2t\" (UniqueName: \"kubernetes.io/projected/f598ce2d-df0a-4477-8c89-126cc5d3a5be-kube-api-access-h5t2t\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.642122 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.642211 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f598ce2d-df0a-4477-8c89-126cc5d3a5be-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.642270 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.642308 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-config\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.642376 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f598ce2d-df0a-4477-8c89-126cc5d3a5be-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.643699 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f598ce2d-df0a-4477-8c89-126cc5d3a5be-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.654254 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f598ce2d-df0a-4477-8c89-126cc5d3a5be-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.655111 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.655164 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.655523 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.655912 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.655956 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8e1318f74973771f6a5154c276126f7c0f974665b019a5b52a8da0720271e292/globalmount\"" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.658314 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-config\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.658910 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f598ce2d-df0a-4477-8c89-126cc5d3a5be-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.661806 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.665310 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/f598ce2d-df0a-4477-8c89-126cc5d3a5be-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.673984 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5t2t\" (UniqueName: \"kubernetes.io/projected/f598ce2d-df0a-4477-8c89-126cc5d3a5be-kube-api-access-h5t2t\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.707491 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f5cb31ad-a2f7-4123-ac28-96fe03ebc1ca\") pod \"prometheus-metric-storage-0\" (UID: \"f598ce2d-df0a-4477-8c89-126cc5d3a5be\") " pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.782118 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.877349 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.914164 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.949427 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktrwh\" (UniqueName: \"kubernetes.io/projected/dba42c85-eeda-4249-8026-6581d57f8dcf-kube-api-access-ktrwh\") pod \"dba42c85-eeda-4249-8026-6581d57f8dcf\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.949499 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8qgn\" (UniqueName: \"kubernetes.io/projected/722a4519-42a8-4f50-8665-59e8bb94a134-kube-api-access-w8qgn\") pod \"722a4519-42a8-4f50-8665-59e8bb94a134\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.949610 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba42c85-eeda-4249-8026-6581d57f8dcf-operator-scripts\") pod \"dba42c85-eeda-4249-8026-6581d57f8dcf\" (UID: \"dba42c85-eeda-4249-8026-6581d57f8dcf\") " Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.949639 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/722a4519-42a8-4f50-8665-59e8bb94a134-operator-scripts\") pod \"722a4519-42a8-4f50-8665-59e8bb94a134\" (UID: \"722a4519-42a8-4f50-8665-59e8bb94a134\") " Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.950593 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/722a4519-42a8-4f50-8665-59e8bb94a134-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "722a4519-42a8-4f50-8665-59e8bb94a134" (UID: "722a4519-42a8-4f50-8665-59e8bb94a134"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.951077 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dba42c85-eeda-4249-8026-6581d57f8dcf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dba42c85-eeda-4249-8026-6581d57f8dcf" (UID: "dba42c85-eeda-4249-8026-6581d57f8dcf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.956418 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dba42c85-eeda-4249-8026-6581d57f8dcf-kube-api-access-ktrwh" (OuterVolumeSpecName: "kube-api-access-ktrwh") pod "dba42c85-eeda-4249-8026-6581d57f8dcf" (UID: "dba42c85-eeda-4249-8026-6581d57f8dcf"). InnerVolumeSpecName "kube-api-access-ktrwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:12 crc kubenswrapper[4682]: I1210 11:06:12.956479 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/722a4519-42a8-4f50-8665-59e8bb94a134-kube-api-access-w8qgn" (OuterVolumeSpecName: "kube-api-access-w8qgn") pod "722a4519-42a8-4f50-8665-59e8bb94a134" (UID: "722a4519-42a8-4f50-8665-59e8bb94a134"). InnerVolumeSpecName "kube-api-access-w8qgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.052039 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dba42c85-eeda-4249-8026-6581d57f8dcf-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.052077 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/722a4519-42a8-4f50-8665-59e8bb94a134-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.052087 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktrwh\" (UniqueName: \"kubernetes.io/projected/dba42c85-eeda-4249-8026-6581d57f8dcf-kube-api-access-ktrwh\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.052098 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8qgn\" (UniqueName: \"kubernetes.io/projected/722a4519-42a8-4f50-8665-59e8bb94a134-kube-api-access-w8qgn\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.115579 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-jrfpg"] Dec 10 11:06:13 crc kubenswrapper[4682]: E1210 11:06:13.116043 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dba42c85-eeda-4249-8026-6581d57f8dcf" containerName="mariadb-account-create-update" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.116067 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="dba42c85-eeda-4249-8026-6581d57f8dcf" containerName="mariadb-account-create-update" Dec 10 11:06:13 crc kubenswrapper[4682]: E1210 11:06:13.116089 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="722a4519-42a8-4f50-8665-59e8bb94a134" containerName="mariadb-database-create" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.116096 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="722a4519-42a8-4f50-8665-59e8bb94a134" containerName="mariadb-database-create" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.116302 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="dba42c85-eeda-4249-8026-6581d57f8dcf" containerName="mariadb-account-create-update" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.116329 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="722a4519-42a8-4f50-8665-59e8bb94a134" containerName="mariadb-database-create" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.117177 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.122868 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-jrfpg"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.154682 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5dvq\" (UniqueName: \"kubernetes.io/projected/0dfa068f-e434-4d53-97c8-44f153f4847f-kube-api-access-c5dvq\") pod \"keystone-db-create-jrfpg\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.154800 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dfa068f-e434-4d53-97c8-44f153f4847f-operator-scripts\") pod \"keystone-db-create-jrfpg\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.196783 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-d60a-account-create-update-cf5pj"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.198105 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.202780 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.209271 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d60a-account-create-update-cf5pj"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.256616 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5dvq\" (UniqueName: \"kubernetes.io/projected/0dfa068f-e434-4d53-97c8-44f153f4847f-kube-api-access-c5dvq\") pod \"keystone-db-create-jrfpg\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.256716 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-operator-scripts\") pod \"keystone-d60a-account-create-update-cf5pj\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.256793 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dfa068f-e434-4d53-97c8-44f153f4847f-operator-scripts\") pod \"keystone-db-create-jrfpg\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.256821 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp9g2\" (UniqueName: \"kubernetes.io/projected/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-kube-api-access-jp9g2\") pod \"keystone-d60a-account-create-update-cf5pj\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.257916 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dfa068f-e434-4d53-97c8-44f153f4847f-operator-scripts\") pod \"keystone-db-create-jrfpg\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.276205 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5dvq\" (UniqueName: \"kubernetes.io/projected/0dfa068f-e434-4d53-97c8-44f153f4847f-kube-api-access-c5dvq\") pod \"keystone-db-create-jrfpg\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.322700 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.324814 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-c2zgs" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.324850 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-c2zgs" event={"ID":"722a4519-42a8-4f50-8665-59e8bb94a134","Type":"ContainerDied","Data":"f74eebb1ab0e48a883728e7509d52719e9e3a4c12ecf1c8a54fa847348b1f3b9"} Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.324879 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f74eebb1ab0e48a883728e7509d52719e9e3a4c12ecf1c8a54fa847348b1f3b9" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.331736 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1423-account-create-update-lzxsx" event={"ID":"dba42c85-eeda-4249-8026-6581d57f8dcf","Type":"ContainerDied","Data":"cd8dc470eebd9fa5dcb54bc2d4a968fbf18cb2dd6f1a1ea0539dc5c884088b9b"} Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.331771 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd8dc470eebd9fa5dcb54bc2d4a968fbf18cb2dd6f1a1ea0539dc5c884088b9b" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.331754 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1423-account-create-update-lzxsx" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.333891 4682 generic.go:334] "Generic (PLEG): container finished" podID="d4794f5b-e93f-4adf-93ba-09e3394e962d" containerID="f6642c6e0e77eab4cf0f50f0ad9415f9d054c4805befea4b95b5565ffc58354a" exitCode=0 Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.333949 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w7jxw-config-fjlkf" event={"ID":"d4794f5b-e93f-4adf-93ba-09e3394e962d","Type":"ContainerDied","Data":"f6642c6e0e77eab4cf0f50f0ad9415f9d054c4805befea4b95b5565ffc58354a"} Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.358235 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-operator-scripts\") pod \"keystone-d60a-account-create-update-cf5pj\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.358398 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp9g2\" (UniqueName: \"kubernetes.io/projected/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-kube-api-access-jp9g2\") pod \"keystone-d60a-account-create-update-cf5pj\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.359667 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-operator-scripts\") pod \"keystone-d60a-account-create-update-cf5pj\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.377098 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp9g2\" (UniqueName: \"kubernetes.io/projected/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-kube-api-access-jp9g2\") pod \"keystone-d60a-account-create-update-cf5pj\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.440848 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.493350 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-49tzp"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.494809 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.509188 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-b00f-account-create-update-gvhl8"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.512850 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.515726 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.516691 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.520877 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-49tzp"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.536895 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b00f-account-create-update-gvhl8"] Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.566991 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa69a882-93ca-452f-9be6-2efc7b53f838-operator-scripts\") pod \"placement-b00f-account-create-update-gvhl8\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.567054 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a292c622-0bd6-436d-95b4-8ca5e643fe10-operator-scripts\") pod \"placement-db-create-49tzp\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.567080 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zh284\" (UniqueName: \"kubernetes.io/projected/a292c622-0bd6-436d-95b4-8ca5e643fe10-kube-api-access-zh284\") pod \"placement-db-create-49tzp\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.567099 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzgxq\" (UniqueName: \"kubernetes.io/projected/fa69a882-93ca-452f-9be6-2efc7b53f838-kube-api-access-rzgxq\") pod \"placement-b00f-account-create-update-gvhl8\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.673806 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa69a882-93ca-452f-9be6-2efc7b53f838-operator-scripts\") pod \"placement-b00f-account-create-update-gvhl8\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.673891 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a292c622-0bd6-436d-95b4-8ca5e643fe10-operator-scripts\") pod \"placement-db-create-49tzp\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.673921 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zh284\" (UniqueName: \"kubernetes.io/projected/a292c622-0bd6-436d-95b4-8ca5e643fe10-kube-api-access-zh284\") pod \"placement-db-create-49tzp\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.673943 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzgxq\" (UniqueName: \"kubernetes.io/projected/fa69a882-93ca-452f-9be6-2efc7b53f838-kube-api-access-rzgxq\") pod \"placement-b00f-account-create-update-gvhl8\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.675168 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa69a882-93ca-452f-9be6-2efc7b53f838-operator-scripts\") pod \"placement-b00f-account-create-update-gvhl8\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.675715 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a292c622-0bd6-436d-95b4-8ca5e643fe10-operator-scripts\") pod \"placement-db-create-49tzp\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.693100 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzgxq\" (UniqueName: \"kubernetes.io/projected/fa69a882-93ca-452f-9be6-2efc7b53f838-kube-api-access-rzgxq\") pod \"placement-b00f-account-create-update-gvhl8\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.697214 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zh284\" (UniqueName: \"kubernetes.io/projected/a292c622-0bd6-436d-95b4-8ca5e643fe10-kube-api-access-zh284\") pod \"placement-db-create-49tzp\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.829769 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49tzp" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.839986 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:13 crc kubenswrapper[4682]: I1210 11:06:13.958364 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-jrfpg"] Dec 10 11:06:14 crc kubenswrapper[4682]: W1210 11:06:14.057266 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10c2d035_3ea8_46a7_9380_0bbe5d729bfe.slice/crio-abe18d7408d4767cca5df379218fb75ec3d1ebc1b42a571386a0a0e6acef8194 WatchSource:0}: Error finding container abe18d7408d4767cca5df379218fb75ec3d1ebc1b42a571386a0a0e6acef8194: Status 404 returned error can't find the container with id abe18d7408d4767cca5df379218fb75ec3d1ebc1b42a571386a0a0e6acef8194 Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.059553 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d60a-account-create-update-cf5pj"] Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.340888 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b00f-account-create-update-gvhl8"] Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.363120 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d60a-account-create-update-cf5pj" event={"ID":"10c2d035-3ea8-46a7-9380-0bbe5d729bfe","Type":"ContainerStarted","Data":"7ffd089fffcbb868d1c5881c31e2ad6b83a45058cdc7a29a52aa910fc962f198"} Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.363158 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d60a-account-create-update-cf5pj" event={"ID":"10c2d035-3ea8-46a7-9380-0bbe5d729bfe","Type":"ContainerStarted","Data":"abe18d7408d4767cca5df379218fb75ec3d1ebc1b42a571386a0a0e6acef8194"} Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.368535 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f598ce2d-df0a-4477-8c89-126cc5d3a5be","Type":"ContainerStarted","Data":"598cecfec45f8e2b0dd4fcf6ada95f80c322c1054d65ec749ce99466913610f4"} Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.371107 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jrfpg" event={"ID":"0dfa068f-e434-4d53-97c8-44f153f4847f","Type":"ContainerStarted","Data":"68600279b2804549dc5c231bf815ff7e8fe46dd4cfd7c8000298b8e88ed54989"} Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.371164 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jrfpg" event={"ID":"0dfa068f-e434-4d53-97c8-44f153f4847f","Type":"ContainerStarted","Data":"0da9991714a3b3996fa146894a1d029ff47d7b7f3c5cbec0a5acfbbff0329928"} Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.453899 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-d60a-account-create-update-cf5pj" podStartSLOduration=1.453877325 podStartE2EDuration="1.453877325s" podCreationTimestamp="2025-12-10 11:06:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:14.382576241 +0000 UTC m=+1254.702787011" watchObservedRunningTime="2025-12-10 11:06:14.453877325 +0000 UTC m=+1254.774088075" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.463579 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-jrfpg" podStartSLOduration=1.4635631789999999 podStartE2EDuration="1.463563179s" podCreationTimestamp="2025-12-10 11:06:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:14.461061071 +0000 UTC m=+1254.781271831" watchObservedRunningTime="2025-12-10 11:06:14.463563179 +0000 UTC m=+1254.783773929" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.472307 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d67c4ad0-1464-4f7f-9877-8601f9b2c3b0" path="/var/lib/kubelet/pods/d67c4ad0-1464-4f7f-9877-8601f9b2c3b0/volumes" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.492194 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-49tzp"] Dec 10 11:06:14 crc kubenswrapper[4682]: W1210 11:06:14.567263 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda292c622_0bd6_436d_95b4_8ca5e643fe10.slice/crio-b8dd4e8461059c5ae20367cca90d4130d866546c7d32fa018dc06f13a07cfdd0 WatchSource:0}: Error finding container b8dd4e8461059c5ae20367cca90d4130d866546c7d32fa018dc06f13a07cfdd0: Status 404 returned error can't find the container with id b8dd4e8461059c5ae20367cca90d4130d866546c7d32fa018dc06f13a07cfdd0 Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.848591 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968713 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-scripts\") pod \"d4794f5b-e93f-4adf-93ba-09e3394e962d\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968802 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run\") pod \"d4794f5b-e93f-4adf-93ba-09e3394e962d\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968820 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-log-ovn\") pod \"d4794f5b-e93f-4adf-93ba-09e3394e962d\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968873 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run-ovn\") pod \"d4794f5b-e93f-4adf-93ba-09e3394e962d\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968921 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run" (OuterVolumeSpecName: "var-run") pod "d4794f5b-e93f-4adf-93ba-09e3394e962d" (UID: "d4794f5b-e93f-4adf-93ba-09e3394e962d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968943 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d4794f5b-e93f-4adf-93ba-09e3394e962d" (UID: "d4794f5b-e93f-4adf-93ba-09e3394e962d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.968997 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d4794f5b-e93f-4adf-93ba-09e3394e962d" (UID: "d4794f5b-e93f-4adf-93ba-09e3394e962d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.969023 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzhg8\" (UniqueName: \"kubernetes.io/projected/d4794f5b-e93f-4adf-93ba-09e3394e962d-kube-api-access-dzhg8\") pod \"d4794f5b-e93f-4adf-93ba-09e3394e962d\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.969137 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-additional-scripts\") pod \"d4794f5b-e93f-4adf-93ba-09e3394e962d\" (UID: \"d4794f5b-e93f-4adf-93ba-09e3394e962d\") " Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.969926 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d4794f5b-e93f-4adf-93ba-09e3394e962d" (UID: "d4794f5b-e93f-4adf-93ba-09e3394e962d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.970030 4682 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.970056 4682 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.970066 4682 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.970076 4682 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d4794f5b-e93f-4adf-93ba-09e3394e962d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.970272 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-scripts" (OuterVolumeSpecName: "scripts") pod "d4794f5b-e93f-4adf-93ba-09e3394e962d" (UID: "d4794f5b-e93f-4adf-93ba-09e3394e962d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:14 crc kubenswrapper[4682]: I1210 11:06:14.975779 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4794f5b-e93f-4adf-93ba-09e3394e962d-kube-api-access-dzhg8" (OuterVolumeSpecName: "kube-api-access-dzhg8") pod "d4794f5b-e93f-4adf-93ba-09e3394e962d" (UID: "d4794f5b-e93f-4adf-93ba-09e3394e962d"). InnerVolumeSpecName "kube-api-access-dzhg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.071358 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4794f5b-e93f-4adf-93ba-09e3394e962d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.071397 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzhg8\" (UniqueName: \"kubernetes.io/projected/d4794f5b-e93f-4adf-93ba-09e3394e962d-kube-api-access-dzhg8\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.380779 4682 generic.go:334] "Generic (PLEG): container finished" podID="10c2d035-3ea8-46a7-9380-0bbe5d729bfe" containerID="7ffd089fffcbb868d1c5881c31e2ad6b83a45058cdc7a29a52aa910fc962f198" exitCode=0 Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.380838 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d60a-account-create-update-cf5pj" event={"ID":"10c2d035-3ea8-46a7-9380-0bbe5d729bfe","Type":"ContainerDied","Data":"7ffd089fffcbb868d1c5881c31e2ad6b83a45058cdc7a29a52aa910fc962f198"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.382483 4682 generic.go:334] "Generic (PLEG): container finished" podID="fa69a882-93ca-452f-9be6-2efc7b53f838" containerID="56b58b48aac3c94f010eec728f4bc3e8103d8d4ccdcbc07059fd76caf5049765" exitCode=0 Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.382530 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b00f-account-create-update-gvhl8" event={"ID":"fa69a882-93ca-452f-9be6-2efc7b53f838","Type":"ContainerDied","Data":"56b58b48aac3c94f010eec728f4bc3e8103d8d4ccdcbc07059fd76caf5049765"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.382548 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b00f-account-create-update-gvhl8" event={"ID":"fa69a882-93ca-452f-9be6-2efc7b53f838","Type":"ContainerStarted","Data":"33e781befe5c2a977bf51d2025dfa39e146780c04bfae29f886b3655ca044f93"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.383695 4682 generic.go:334] "Generic (PLEG): container finished" podID="a292c622-0bd6-436d-95b4-8ca5e643fe10" containerID="af8497874b71553ed16ea052b1911ff74b1640215571ae0d6342ed48e7e50519" exitCode=0 Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.383736 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-49tzp" event={"ID":"a292c622-0bd6-436d-95b4-8ca5e643fe10","Type":"ContainerDied","Data":"af8497874b71553ed16ea052b1911ff74b1640215571ae0d6342ed48e7e50519"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.383752 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-49tzp" event={"ID":"a292c622-0bd6-436d-95b4-8ca5e643fe10","Type":"ContainerStarted","Data":"b8dd4e8461059c5ae20367cca90d4130d866546c7d32fa018dc06f13a07cfdd0"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.385517 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w7jxw-config-fjlkf" event={"ID":"d4794f5b-e93f-4adf-93ba-09e3394e962d","Type":"ContainerDied","Data":"2ab29b1a5b03892c1731c2b5929746007c3fe5a6f7480df2c3b3455501823579"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.385541 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ab29b1a5b03892c1731c2b5929746007c3fe5a6f7480df2c3b3455501823579" Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.385576 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w7jxw-config-fjlkf" Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.398807 4682 generic.go:334] "Generic (PLEG): container finished" podID="0dfa068f-e434-4d53-97c8-44f153f4847f" containerID="68600279b2804549dc5c231bf815ff7e8fe46dd4cfd7c8000298b8e88ed54989" exitCode=0 Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.398847 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jrfpg" event={"ID":"0dfa068f-e434-4d53-97c8-44f153f4847f","Type":"ContainerDied","Data":"68600279b2804549dc5c231bf815ff7e8fe46dd4cfd7c8000298b8e88ed54989"} Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.432482 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-w7jxw-config-fjlkf"] Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.445330 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-w7jxw-config-fjlkf"] Dec 10 11:06:15 crc kubenswrapper[4682]: I1210 11:06:15.543064 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-w7jxw" Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.392827 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4794f5b-e93f-4adf-93ba-09e3394e962d" path="/var/lib/kubelet/pods/d4794f5b-e93f-4adf-93ba-09e3394e962d/volumes" Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.414805 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f598ce2d-df0a-4477-8c89-126cc5d3a5be","Type":"ContainerStarted","Data":"38fb7c28e67a60b1d358dd5e1a01c800dc89be5b760644eb5c337c4eace4bf09"} Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.862294 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49tzp" Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.974215 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a292c622-0bd6-436d-95b4-8ca5e643fe10-operator-scripts\") pod \"a292c622-0bd6-436d-95b4-8ca5e643fe10\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.974408 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zh284\" (UniqueName: \"kubernetes.io/projected/a292c622-0bd6-436d-95b4-8ca5e643fe10-kube-api-access-zh284\") pod \"a292c622-0bd6-436d-95b4-8ca5e643fe10\" (UID: \"a292c622-0bd6-436d-95b4-8ca5e643fe10\") " Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.975104 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a292c622-0bd6-436d-95b4-8ca5e643fe10-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a292c622-0bd6-436d-95b4-8ca5e643fe10" (UID: "a292c622-0bd6-436d-95b4-8ca5e643fe10"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:16 crc kubenswrapper[4682]: I1210 11:06:16.980075 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a292c622-0bd6-436d-95b4-8ca5e643fe10-kube-api-access-zh284" (OuterVolumeSpecName: "kube-api-access-zh284") pod "a292c622-0bd6-436d-95b4-8ca5e643fe10" (UID: "a292c622-0bd6-436d-95b4-8ca5e643fe10"). InnerVolumeSpecName "kube-api-access-zh284". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.035557 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.041220 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.058543 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.076275 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zh284\" (UniqueName: \"kubernetes.io/projected/a292c622-0bd6-436d-95b4-8ca5e643fe10-kube-api-access-zh284\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.076316 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a292c622-0bd6-436d-95b4-8ca5e643fe10-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.177712 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzgxq\" (UniqueName: \"kubernetes.io/projected/fa69a882-93ca-452f-9be6-2efc7b53f838-kube-api-access-rzgxq\") pod \"fa69a882-93ca-452f-9be6-2efc7b53f838\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.177812 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dfa068f-e434-4d53-97c8-44f153f4847f-operator-scripts\") pod \"0dfa068f-e434-4d53-97c8-44f153f4847f\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.177894 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jp9g2\" (UniqueName: \"kubernetes.io/projected/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-kube-api-access-jp9g2\") pod \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.177924 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-operator-scripts\") pod \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\" (UID: \"10c2d035-3ea8-46a7-9380-0bbe5d729bfe\") " Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.178039 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5dvq\" (UniqueName: \"kubernetes.io/projected/0dfa068f-e434-4d53-97c8-44f153f4847f-kube-api-access-c5dvq\") pod \"0dfa068f-e434-4d53-97c8-44f153f4847f\" (UID: \"0dfa068f-e434-4d53-97c8-44f153f4847f\") " Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.178068 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa69a882-93ca-452f-9be6-2efc7b53f838-operator-scripts\") pod \"fa69a882-93ca-452f-9be6-2efc7b53f838\" (UID: \"fa69a882-93ca-452f-9be6-2efc7b53f838\") " Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.178182 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dfa068f-e434-4d53-97c8-44f153f4847f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0dfa068f-e434-4d53-97c8-44f153f4847f" (UID: "0dfa068f-e434-4d53-97c8-44f153f4847f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.178461 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "10c2d035-3ea8-46a7-9380-0bbe5d729bfe" (UID: "10c2d035-3ea8-46a7-9380-0bbe5d729bfe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.179171 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa69a882-93ca-452f-9be6-2efc7b53f838-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fa69a882-93ca-452f-9be6-2efc7b53f838" (UID: "fa69a882-93ca-452f-9be6-2efc7b53f838"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.179726 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fa69a882-93ca-452f-9be6-2efc7b53f838-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.179831 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0dfa068f-e434-4d53-97c8-44f153f4847f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.180922 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.180888 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa69a882-93ca-452f-9be6-2efc7b53f838-kube-api-access-rzgxq" (OuterVolumeSpecName: "kube-api-access-rzgxq") pod "fa69a882-93ca-452f-9be6-2efc7b53f838" (UID: "fa69a882-93ca-452f-9be6-2efc7b53f838"). InnerVolumeSpecName "kube-api-access-rzgxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.181438 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dfa068f-e434-4d53-97c8-44f153f4847f-kube-api-access-c5dvq" (OuterVolumeSpecName: "kube-api-access-c5dvq") pod "0dfa068f-e434-4d53-97c8-44f153f4847f" (UID: "0dfa068f-e434-4d53-97c8-44f153f4847f"). InnerVolumeSpecName "kube-api-access-c5dvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.182314 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-kube-api-access-jp9g2" (OuterVolumeSpecName: "kube-api-access-jp9g2") pod "10c2d035-3ea8-46a7-9380-0bbe5d729bfe" (UID: "10c2d035-3ea8-46a7-9380-0bbe5d729bfe"). InnerVolumeSpecName "kube-api-access-jp9g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.287212 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5dvq\" (UniqueName: \"kubernetes.io/projected/0dfa068f-e434-4d53-97c8-44f153f4847f-kube-api-access-c5dvq\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.287251 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzgxq\" (UniqueName: \"kubernetes.io/projected/fa69a882-93ca-452f-9be6-2efc7b53f838-kube-api-access-rzgxq\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.287260 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jp9g2\" (UniqueName: \"kubernetes.io/projected/10c2d035-3ea8-46a7-9380-0bbe5d729bfe-kube-api-access-jp9g2\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.426106 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jrfpg" event={"ID":"0dfa068f-e434-4d53-97c8-44f153f4847f","Type":"ContainerDied","Data":"0da9991714a3b3996fa146894a1d029ff47d7b7f3c5cbec0a5acfbbff0329928"} Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.426136 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jrfpg" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.426155 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0da9991714a3b3996fa146894a1d029ff47d7b7f3c5cbec0a5acfbbff0329928" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.427804 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d60a-account-create-update-cf5pj" event={"ID":"10c2d035-3ea8-46a7-9380-0bbe5d729bfe","Type":"ContainerDied","Data":"abe18d7408d4767cca5df379218fb75ec3d1ebc1b42a571386a0a0e6acef8194"} Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.427885 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abe18d7408d4767cca5df379218fb75ec3d1ebc1b42a571386a0a0e6acef8194" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.427855 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d60a-account-create-update-cf5pj" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.429765 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b00f-account-create-update-gvhl8" event={"ID":"fa69a882-93ca-452f-9be6-2efc7b53f838","Type":"ContainerDied","Data":"33e781befe5c2a977bf51d2025dfa39e146780c04bfae29f886b3655ca044f93"} Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.429800 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33e781befe5c2a977bf51d2025dfa39e146780c04bfae29f886b3655ca044f93" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.429858 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b00f-account-create-update-gvhl8" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.439987 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-49tzp" event={"ID":"a292c622-0bd6-436d-95b4-8ca5e643fe10","Type":"ContainerDied","Data":"b8dd4e8461059c5ae20367cca90d4130d866546c7d32fa018dc06f13a07cfdd0"} Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.440242 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8dd4e8461059c5ae20367cca90d4130d866546c7d32fa018dc06f13a07cfdd0" Dec 10 11:06:17 crc kubenswrapper[4682]: I1210 11:06:17.440027 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-49tzp" Dec 10 11:06:18 crc kubenswrapper[4682]: I1210 11:06:18.483787 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cloudkitty-lokistack-ingester-0" podUID="ea1f94a0-5b00-4aac-85ae-f7af9df196b6" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 11:06:18 crc kubenswrapper[4682]: I1210 11:06:18.611615 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:06:18 crc kubenswrapper[4682]: I1210 11:06:18.622336 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b7a82b72-0262-4a74-becf-36ead02cb92c-etc-swift\") pod \"swift-storage-0\" (UID: \"b7a82b72-0262-4a74-becf-36ead02cb92c\") " pod="openstack/swift-storage-0" Dec 10 11:06:18 crc kubenswrapper[4682]: I1210 11:06:18.718834 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.006710 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-bxlhz"] Dec 10 11:06:19 crc kubenswrapper[4682]: E1210 11:06:19.007436 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dfa068f-e434-4d53-97c8-44f153f4847f" containerName="mariadb-database-create" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.007455 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dfa068f-e434-4d53-97c8-44f153f4847f" containerName="mariadb-database-create" Dec 10 11:06:19 crc kubenswrapper[4682]: E1210 11:06:19.007531 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10c2d035-3ea8-46a7-9380-0bbe5d729bfe" containerName="mariadb-account-create-update" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.007542 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="10c2d035-3ea8-46a7-9380-0bbe5d729bfe" containerName="mariadb-account-create-update" Dec 10 11:06:19 crc kubenswrapper[4682]: E1210 11:06:19.007561 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a292c622-0bd6-436d-95b4-8ca5e643fe10" containerName="mariadb-database-create" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.007585 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a292c622-0bd6-436d-95b4-8ca5e643fe10" containerName="mariadb-database-create" Dec 10 11:06:19 crc kubenswrapper[4682]: E1210 11:06:19.007714 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4794f5b-e93f-4adf-93ba-09e3394e962d" containerName="ovn-config" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.007727 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4794f5b-e93f-4adf-93ba-09e3394e962d" containerName="ovn-config" Dec 10 11:06:19 crc kubenswrapper[4682]: E1210 11:06:19.007747 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa69a882-93ca-452f-9be6-2efc7b53f838" containerName="mariadb-account-create-update" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.007754 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa69a882-93ca-452f-9be6-2efc7b53f838" containerName="mariadb-account-create-update" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.007995 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa69a882-93ca-452f-9be6-2efc7b53f838" containerName="mariadb-account-create-update" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.008019 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4794f5b-e93f-4adf-93ba-09e3394e962d" containerName="ovn-config" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.008028 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dfa068f-e434-4d53-97c8-44f153f4847f" containerName="mariadb-database-create" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.008040 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="10c2d035-3ea8-46a7-9380-0bbe5d729bfe" containerName="mariadb-account-create-update" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.008050 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a292c622-0bd6-436d-95b4-8ca5e643fe10" containerName="mariadb-database-create" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.009138 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.011677 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qm2p5" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.011936 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.015652 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bxlhz"] Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.122086 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4djzx\" (UniqueName: \"kubernetes.io/projected/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-kube-api-access-4djzx\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.122157 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-combined-ca-bundle\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.122293 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-db-sync-config-data\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.122349 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.224404 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.224501 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4djzx\" (UniqueName: \"kubernetes.io/projected/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-kube-api-access-4djzx\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.224567 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-combined-ca-bundle\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.224662 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-db-sync-config-data\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.228968 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-db-sync-config-data\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.229953 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-combined-ca-bundle\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.232907 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.241278 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4djzx\" (UniqueName: \"kubernetes.io/projected/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-kube-api-access-4djzx\") pod \"glance-db-sync-bxlhz\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:19 crc kubenswrapper[4682]: I1210 11:06:19.331757 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.415738 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.437498 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.491293 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"69993c4f82d5395d22fb68222fd8b82583fa0c738afc678658acf4e95579daba"} Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.832045 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-5vf6p"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.833478 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.855441 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-5vf6p"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.866131 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-b63b-account-create-update-7tx96"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.867842 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.871580 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.890392 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b63b-account-create-update-7tx96"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.942856 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-bxmng"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.956907 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.960528 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-fa63-account-create-update-pb8d8"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.961986 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.962659 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2pj2\" (UniqueName: \"kubernetes.io/projected/81a956dc-54cb-4eb1-8ac2-996a66eca415-kube-api-access-q2pj2\") pod \"cinder-db-create-5vf6p\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.962711 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkh9h\" (UniqueName: \"kubernetes.io/projected/241a5a1d-b18b-4151-9aa9-81d82d723700-kube-api-access-lkh9h\") pod \"barbican-b63b-account-create-update-7tx96\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.962736 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81a956dc-54cb-4eb1-8ac2-996a66eca415-operator-scripts\") pod \"cinder-db-create-5vf6p\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.962828 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241a5a1d-b18b-4151-9aa9-81d82d723700-operator-scripts\") pod \"barbican-b63b-account-create-update-7tx96\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.966064 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.967520 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-bxmng"] Dec 10 11:06:20 crc kubenswrapper[4682]: I1210 11:06:20.994545 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fa63-account-create-update-pb8d8"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064411 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2pj2\" (UniqueName: \"kubernetes.io/projected/81a956dc-54cb-4eb1-8ac2-996a66eca415-kube-api-access-q2pj2\") pod \"cinder-db-create-5vf6p\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064496 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81a956dc-54cb-4eb1-8ac2-996a66eca415-operator-scripts\") pod \"cinder-db-create-5vf6p\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064518 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkh9h\" (UniqueName: \"kubernetes.io/projected/241a5a1d-b18b-4151-9aa9-81d82d723700-kube-api-access-lkh9h\") pod \"barbican-b63b-account-create-update-7tx96\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064589 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twq64\" (UniqueName: \"kubernetes.io/projected/d6473447-e97b-4ce1-bc48-0028c9ac3444-kube-api-access-twq64\") pod \"barbican-db-create-bxmng\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064640 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6473447-e97b-4ce1-bc48-0028c9ac3444-operator-scripts\") pod \"barbican-db-create-bxmng\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064966 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241a5a1d-b18b-4151-9aa9-81d82d723700-operator-scripts\") pod \"barbican-b63b-account-create-update-7tx96\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.064998 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwfqg\" (UniqueName: \"kubernetes.io/projected/86b9680d-68e4-4b89-bf5a-4925464b50ef-kube-api-access-bwfqg\") pod \"cinder-fa63-account-create-update-pb8d8\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.065047 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86b9680d-68e4-4b89-bf5a-4925464b50ef-operator-scripts\") pod \"cinder-fa63-account-create-update-pb8d8\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.065646 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81a956dc-54cb-4eb1-8ac2-996a66eca415-operator-scripts\") pod \"cinder-db-create-5vf6p\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.065732 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241a5a1d-b18b-4151-9aa9-81d82d723700-operator-scripts\") pod \"barbican-b63b-account-create-update-7tx96\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.082872 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2pj2\" (UniqueName: \"kubernetes.io/projected/81a956dc-54cb-4eb1-8ac2-996a66eca415-kube-api-access-q2pj2\") pod \"cinder-db-create-5vf6p\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.107606 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkh9h\" (UniqueName: \"kubernetes.io/projected/241a5a1d-b18b-4151-9aa9-81d82d723700-kube-api-access-lkh9h\") pod \"barbican-b63b-account-create-update-7tx96\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.120673 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-db-create-pglvk"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.122421 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.139973 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-ms2nv"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.141221 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.145160 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.145411 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-44cvq" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.145622 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.145884 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.153413 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.166221 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twq64\" (UniqueName: \"kubernetes.io/projected/d6473447-e97b-4ce1-bc48-0028c9ac3444-kube-api-access-twq64\") pod \"barbican-db-create-bxmng\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.166275 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6473447-e97b-4ce1-bc48-0028c9ac3444-operator-scripts\") pod \"barbican-db-create-bxmng\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.166323 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwfqg\" (UniqueName: \"kubernetes.io/projected/86b9680d-68e4-4b89-bf5a-4925464b50ef-kube-api-access-bwfqg\") pod \"cinder-fa63-account-create-update-pb8d8\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.166364 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86b9680d-68e4-4b89-bf5a-4925464b50ef-operator-scripts\") pod \"cinder-fa63-account-create-update-pb8d8\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.169029 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86b9680d-68e4-4b89-bf5a-4925464b50ef-operator-scripts\") pod \"cinder-fa63-account-create-update-pb8d8\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.171222 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6473447-e97b-4ce1-bc48-0028c9ac3444-operator-scripts\") pod \"barbican-db-create-bxmng\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.171306 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-ms2nv"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.194439 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.197025 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwfqg\" (UniqueName: \"kubernetes.io/projected/86b9680d-68e4-4b89-bf5a-4925464b50ef-kube-api-access-bwfqg\") pod \"cinder-fa63-account-create-update-pb8d8\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.197782 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twq64\" (UniqueName: \"kubernetes.io/projected/d6473447-e97b-4ce1-bc48-0028c9ac3444-kube-api-access-twq64\") pod \"barbican-db-create-bxmng\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.202819 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-create-pglvk"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.235414 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.257892 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-9b10-account-create-update-p68fr"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.268640 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-config-data\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.268685 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bghr\" (UniqueName: \"kubernetes.io/projected/2e662a28-3dfe-43c7-a368-ea48cd6867a8-kube-api-access-4bghr\") pod \"cloudkitty-db-create-pglvk\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.268710 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-combined-ca-bundle\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.268815 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e662a28-3dfe-43c7-a368-ea48cd6867a8-operator-scripts\") pod \"cloudkitty-db-create-pglvk\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.268887 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbgrk\" (UniqueName: \"kubernetes.io/projected/327df6d2-4568-45e6-a719-650e8881d7cc-kube-api-access-nbgrk\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.281147 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.282546 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.283309 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9b10-account-create-update-p68fr"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.287076 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.295707 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bxlhz"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.300450 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.372673 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-04dc-account-create-update-ddvxc"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.375691 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.378396 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-db-secret" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.387686 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e662a28-3dfe-43c7-a368-ea48cd6867a8-operator-scripts\") pod \"cloudkitty-db-create-pglvk\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.387754 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24b52051-08ad-426d-a9d4-23465f022f28-operator-scripts\") pod \"neutron-9b10-account-create-update-p68fr\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.387894 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbgrk\" (UniqueName: \"kubernetes.io/projected/327df6d2-4568-45e6-a719-650e8881d7cc-kube-api-access-nbgrk\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.387938 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m927d\" (UniqueName: \"kubernetes.io/projected/24b52051-08ad-426d-a9d4-23465f022f28-kube-api-access-m927d\") pod \"neutron-9b10-account-create-update-p68fr\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.388029 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-config-data\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.388058 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bghr\" (UniqueName: \"kubernetes.io/projected/2e662a28-3dfe-43c7-a368-ea48cd6867a8-kube-api-access-4bghr\") pod \"cloudkitty-db-create-pglvk\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.388126 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-combined-ca-bundle\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.391409 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e662a28-3dfe-43c7-a368-ea48cd6867a8-operator-scripts\") pod \"cloudkitty-db-create-pglvk\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.392316 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-54zjl"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.524444 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24b52051-08ad-426d-a9d4-23465f022f28-operator-scripts\") pod \"neutron-9b10-account-create-update-p68fr\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.524677 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m927d\" (UniqueName: \"kubernetes.io/projected/24b52051-08ad-426d-a9d4-23465f022f28-kube-api-access-m927d\") pod \"neutron-9b10-account-create-update-p68fr\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.527975 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.544848 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-config-data\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.545615 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24b52051-08ad-426d-a9d4-23465f022f28-operator-scripts\") pod \"neutron-9b10-account-create-update-p68fr\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.546629 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bghr\" (UniqueName: \"kubernetes.io/projected/2e662a28-3dfe-43c7-a368-ea48cd6867a8-kube-api-access-4bghr\") pod \"cloudkitty-db-create-pglvk\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.552582 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbgrk\" (UniqueName: \"kubernetes.io/projected/327df6d2-4568-45e6-a719-650e8881d7cc-kube-api-access-nbgrk\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.552986 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-04dc-account-create-update-ddvxc"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.563763 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-combined-ca-bundle\") pod \"keystone-db-sync-ms2nv\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.567287 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m927d\" (UniqueName: \"kubernetes.io/projected/24b52051-08ad-426d-a9d4-23465f022f28-kube-api-access-m927d\") pod \"neutron-9b10-account-create-update-p68fr\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.672667 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bxlhz" event={"ID":"4ca63023-1a06-43a7-b9e4-1235b76b8ec8","Type":"ContainerStarted","Data":"f28a68f10a66cdcc9fe2ec9648300a850a0c1e5ca6b56f26359a796f29744abd"} Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.673322 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.675625 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh2th\" (UniqueName: \"kubernetes.io/projected/4e45654e-91af-4171-a0f7-e15eac1a40e9-kube-api-access-bh2th\") pod \"cloudkitty-04dc-account-create-update-ddvxc\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.675780 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4e45654e-91af-4171-a0f7-e15eac1a40e9-operator-scripts\") pod \"cloudkitty-04dc-account-create-update-ddvxc\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.696089 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-54zjl"] Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.697811 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.699449 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.778482 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4e45654e-91af-4171-a0f7-e15eac1a40e9-operator-scripts\") pod \"cloudkitty-04dc-account-create-update-ddvxc\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.778866 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2t72\" (UniqueName: \"kubernetes.io/projected/433ef90f-139b-4a60-918b-ef0a226ee731-kube-api-access-h2t72\") pod \"neutron-db-create-54zjl\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.778935 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh2th\" (UniqueName: \"kubernetes.io/projected/4e45654e-91af-4171-a0f7-e15eac1a40e9-kube-api-access-bh2th\") pod \"cloudkitty-04dc-account-create-update-ddvxc\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.778995 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/433ef90f-139b-4a60-918b-ef0a226ee731-operator-scripts\") pod \"neutron-db-create-54zjl\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.786301 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4e45654e-91af-4171-a0f7-e15eac1a40e9-operator-scripts\") pod \"cloudkitty-04dc-account-create-update-ddvxc\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.830855 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh2th\" (UniqueName: \"kubernetes.io/projected/4e45654e-91af-4171-a0f7-e15eac1a40e9-kube-api-access-bh2th\") pod \"cloudkitty-04dc-account-create-update-ddvxc\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.882286 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2t72\" (UniqueName: \"kubernetes.io/projected/433ef90f-139b-4a60-918b-ef0a226ee731-kube-api-access-h2t72\") pod \"neutron-db-create-54zjl\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.882488 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/433ef90f-139b-4a60-918b-ef0a226ee731-operator-scripts\") pod \"neutron-db-create-54zjl\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.885838 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/433ef90f-139b-4a60-918b-ef0a226ee731-operator-scripts\") pod \"neutron-db-create-54zjl\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:21 crc kubenswrapper[4682]: I1210 11:06:21.932171 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2t72\" (UniqueName: \"kubernetes.io/projected/433ef90f-139b-4a60-918b-ef0a226ee731-kube-api-access-h2t72\") pod \"neutron-db-create-54zjl\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.022366 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.027033 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.335410 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-5vf6p"] Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.365096 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fa63-account-create-update-pb8d8"] Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.576054 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b63b-account-create-update-7tx96"] Dec 10 11:06:22 crc kubenswrapper[4682]: W1210 11:06:22.601069 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod241a5a1d_b18b_4151_9aa9_81d82d723700.slice/crio-cfc22a1ceac78067b1c2d834fbb76a61613a0000f18d1411634c96d6dc89dcca WatchSource:0}: Error finding container cfc22a1ceac78067b1c2d834fbb76a61613a0000f18d1411634c96d6dc89dcca: Status 404 returned error can't find the container with id cfc22a1ceac78067b1c2d834fbb76a61613a0000f18d1411634c96d6dc89dcca Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.655176 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-bxmng"] Dec 10 11:06:22 crc kubenswrapper[4682]: W1210 11:06:22.667782 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6473447_e97b_4ce1_bc48_0028c9ac3444.slice/crio-a9e6718d825285661c741894a0cd56c03091732025c261ef1299a9414ae5c79a WatchSource:0}: Error finding container a9e6718d825285661c741894a0cd56c03091732025c261ef1299a9414ae5c79a: Status 404 returned error can't find the container with id a9e6718d825285661c741894a0cd56c03091732025c261ef1299a9414ae5c79a Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.677600 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-ms2nv"] Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.697002 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9b10-account-create-update-p68fr"] Dec 10 11:06:22 crc kubenswrapper[4682]: W1210 11:06:22.697708 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod327df6d2_4568_45e6_a719_650e8881d7cc.slice/crio-f4cb21cde031bde310324bb141a1336b2bc1de82fef1f9d5efb64d0ed2db5a82 WatchSource:0}: Error finding container f4cb21cde031bde310324bb141a1336b2bc1de82fef1f9d5efb64d0ed2db5a82: Status 404 returned error can't find the container with id f4cb21cde031bde310324bb141a1336b2bc1de82fef1f9d5efb64d0ed2db5a82 Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.700037 4682 generic.go:334] "Generic (PLEG): container finished" podID="f598ce2d-df0a-4477-8c89-126cc5d3a5be" containerID="38fb7c28e67a60b1d358dd5e1a01c800dc89be5b760644eb5c337c4eace4bf09" exitCode=0 Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.700966 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f598ce2d-df0a-4477-8c89-126cc5d3a5be","Type":"ContainerDied","Data":"38fb7c28e67a60b1d358dd5e1a01c800dc89be5b760644eb5c337c4eace4bf09"} Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.709774 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-5vf6p" event={"ID":"81a956dc-54cb-4eb1-8ac2-996a66eca415","Type":"ContainerStarted","Data":"af15f42ffdc85f244dd1c0b3caad0f76ace84a34950759327072cf41db4f7c83"} Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.724067 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b63b-account-create-update-7tx96" event={"ID":"241a5a1d-b18b-4151-9aa9-81d82d723700","Type":"ContainerStarted","Data":"cfc22a1ceac78067b1c2d834fbb76a61613a0000f18d1411634c96d6dc89dcca"} Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.725214 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bxmng" event={"ID":"d6473447-e97b-4ce1-bc48-0028c9ac3444","Type":"ContainerStarted","Data":"a9e6718d825285661c741894a0cd56c03091732025c261ef1299a9414ae5c79a"} Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.745152 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa63-account-create-update-pb8d8" event={"ID":"86b9680d-68e4-4b89-bf5a-4925464b50ef","Type":"ContainerStarted","Data":"453101ecd81c64af45fd9e9b62fe4f1a9c5ea96cd4030eeec98410d4c91faa38"} Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.774224 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-fa63-account-create-update-pb8d8" podStartSLOduration=2.774202473 podStartE2EDuration="2.774202473s" podCreationTimestamp="2025-12-10 11:06:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:22.768730822 +0000 UTC m=+1263.088941572" watchObservedRunningTime="2025-12-10 11:06:22.774202473 +0000 UTC m=+1263.094413223" Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.814506 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-create-pglvk"] Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.921345 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-54zjl"] Dec 10 11:06:22 crc kubenswrapper[4682]: I1210 11:06:22.946556 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-04dc-account-create-update-ddvxc"] Dec 10 11:06:23 crc kubenswrapper[4682]: W1210 11:06:23.262314 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4e45654e_91af_4171_a0f7_e15eac1a40e9.slice/crio-b242e80aa08583af3662116253018b1953a50f89fa263e4ce53a0235db7b1c30 WatchSource:0}: Error finding container b242e80aa08583af3662116253018b1953a50f89fa263e4ce53a0235db7b1c30: Status 404 returned error can't find the container with id b242e80aa08583af3662116253018b1953a50f89fa263e4ce53a0235db7b1c30 Dec 10 11:06:23 crc kubenswrapper[4682]: W1210 11:06:23.265064 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e662a28_3dfe_43c7_a368_ea48cd6867a8.slice/crio-54f148ce6ffe35b288671dcaaa122ff0620fd589f14a3d9222a9f1e8f9f58420 WatchSource:0}: Error finding container 54f148ce6ffe35b288671dcaaa122ff0620fd589f14a3d9222a9f1e8f9f58420: Status 404 returned error can't find the container with id 54f148ce6ffe35b288671dcaaa122ff0620fd589f14a3d9222a9f1e8f9f58420 Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.758688 4682 generic.go:334] "Generic (PLEG): container finished" podID="81a956dc-54cb-4eb1-8ac2-996a66eca415" containerID="5b338ebd6437cc21ebc24eb3f749ae52269999a44ad36854df37c8f13d8ef6a0" exitCode=0 Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.758752 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-5vf6p" event={"ID":"81a956dc-54cb-4eb1-8ac2-996a66eca415","Type":"ContainerDied","Data":"5b338ebd6437cc21ebc24eb3f749ae52269999a44ad36854df37c8f13d8ef6a0"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.762336 4682 generic.go:334] "Generic (PLEG): container finished" podID="241a5a1d-b18b-4151-9aa9-81d82d723700" containerID="e89792f03ca32052e3cc2473918637ed39251ba9ca9da8d48a2cf33e117c3ce7" exitCode=0 Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.762430 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b63b-account-create-update-7tx96" event={"ID":"241a5a1d-b18b-4151-9aa9-81d82d723700","Type":"ContainerDied","Data":"e89792f03ca32052e3cc2473918637ed39251ba9ca9da8d48a2cf33e117c3ce7"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.765175 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-54zjl" event={"ID":"433ef90f-139b-4a60-918b-ef0a226ee731","Type":"ContainerStarted","Data":"3e5591bd3b71654cfefbbb9be455974e21b9c4a97b971b3565541f344f276555"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.768899 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" event={"ID":"4e45654e-91af-4171-a0f7-e15eac1a40e9","Type":"ContainerStarted","Data":"b242e80aa08583af3662116253018b1953a50f89fa263e4ce53a0235db7b1c30"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.774916 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ms2nv" event={"ID":"327df6d2-4568-45e6-a719-650e8881d7cc","Type":"ContainerStarted","Data":"f4cb21cde031bde310324bb141a1336b2bc1de82fef1f9d5efb64d0ed2db5a82"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.778226 4682 generic.go:334] "Generic (PLEG): container finished" podID="86b9680d-68e4-4b89-bf5a-4925464b50ef" containerID="b228321dc617cce4be5adeb972528a59d65c553ab5ee47201cc1d98e5861fd6a" exitCode=0 Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.778302 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa63-account-create-update-pb8d8" event={"ID":"86b9680d-68e4-4b89-bf5a-4925464b50ef","Type":"ContainerDied","Data":"b228321dc617cce4be5adeb972528a59d65c553ab5ee47201cc1d98e5861fd6a"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.780585 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-pglvk" event={"ID":"2e662a28-3dfe-43c7-a368-ea48cd6867a8","Type":"ContainerStarted","Data":"54f148ce6ffe35b288671dcaaa122ff0620fd589f14a3d9222a9f1e8f9f58420"} Dec 10 11:06:23 crc kubenswrapper[4682]: I1210 11:06:23.783120 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9b10-account-create-update-p68fr" event={"ID":"24b52051-08ad-426d-a9d4-23465f022f28","Type":"ContainerStarted","Data":"02155f9f4f62076a64128c363257e7e77025d845561731f59503901ea0352616"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.803315 4682 generic.go:334] "Generic (PLEG): container finished" podID="2e662a28-3dfe-43c7-a368-ea48cd6867a8" containerID="b0ac107c437020dc84de66755d26373d3720e923f47e2889728f6796ef1812bd" exitCode=0 Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.803546 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-pglvk" event={"ID":"2e662a28-3dfe-43c7-a368-ea48cd6867a8","Type":"ContainerDied","Data":"b0ac107c437020dc84de66755d26373d3720e923f47e2889728f6796ef1812bd"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.812293 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"4e235769d994eb2f0b55ffcf3dd4ca948657c7114b7089a52e0d0f1833228466"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.812343 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"782bb43cf19f27b336ff329a75a141b7ee8d48a212928aad50d986be2c45c050"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.812354 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"6aa0125b96dad86f2cd59127a5a227795a552686f5fd7630966df886c84de4cf"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.812364 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"37eb12119a71bc0b07ce446516dbef5a24b1542948248634c1396e61b59bd7da"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.820612 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f598ce2d-df0a-4477-8c89-126cc5d3a5be","Type":"ContainerStarted","Data":"2345aa35bb36cc967c196fffa1837a2b3cf5e7907624d4ecec141636d8203a0e"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.824276 4682 generic.go:334] "Generic (PLEG): container finished" podID="433ef90f-139b-4a60-918b-ef0a226ee731" containerID="41aa57bd613f104130b3ebf0d4c9dff97a5c58ee309d7e345ef165f15231db4b" exitCode=0 Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.824423 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-54zjl" event={"ID":"433ef90f-139b-4a60-918b-ef0a226ee731","Type":"ContainerDied","Data":"41aa57bd613f104130b3ebf0d4c9dff97a5c58ee309d7e345ef165f15231db4b"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.826881 4682 generic.go:334] "Generic (PLEG): container finished" podID="d6473447-e97b-4ce1-bc48-0028c9ac3444" containerID="75a175c83f62d943fb078473583e8fe4405c08b5eb3594c17433a8ccba984aa2" exitCode=0 Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.826946 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bxmng" event={"ID":"d6473447-e97b-4ce1-bc48-0028c9ac3444","Type":"ContainerDied","Data":"75a175c83f62d943fb078473583e8fe4405c08b5eb3594c17433a8ccba984aa2"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.831456 4682 generic.go:334] "Generic (PLEG): container finished" podID="4e45654e-91af-4171-a0f7-e15eac1a40e9" containerID="80c7e389620ac0aba07b3ce5cedc34d39ecc3e82d31c2ce1098a9e017f4aa39c" exitCode=0 Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.831532 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" event={"ID":"4e45654e-91af-4171-a0f7-e15eac1a40e9","Type":"ContainerDied","Data":"80c7e389620ac0aba07b3ce5cedc34d39ecc3e82d31c2ce1098a9e017f4aa39c"} Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.847637 4682 generic.go:334] "Generic (PLEG): container finished" podID="24b52051-08ad-426d-a9d4-23465f022f28" containerID="539b09b199ebb47b12ad99cc7094997abb38ce6bb0a7f6702141dccbcdb76a57" exitCode=0 Dec 10 11:06:24 crc kubenswrapper[4682]: I1210 11:06:24.847733 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9b10-account-create-update-p68fr" event={"ID":"24b52051-08ad-426d-a9d4-23465f022f28","Type":"ContainerDied","Data":"539b09b199ebb47b12ad99cc7094997abb38ce6bb0a7f6702141dccbcdb76a57"} Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.312376 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.356654 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2pj2\" (UniqueName: \"kubernetes.io/projected/81a956dc-54cb-4eb1-8ac2-996a66eca415-kube-api-access-q2pj2\") pod \"81a956dc-54cb-4eb1-8ac2-996a66eca415\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.356706 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81a956dc-54cb-4eb1-8ac2-996a66eca415-operator-scripts\") pod \"81a956dc-54cb-4eb1-8ac2-996a66eca415\" (UID: \"81a956dc-54cb-4eb1-8ac2-996a66eca415\") " Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.357533 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81a956dc-54cb-4eb1-8ac2-996a66eca415-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81a956dc-54cb-4eb1-8ac2-996a66eca415" (UID: "81a956dc-54cb-4eb1-8ac2-996a66eca415"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.362661 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81a956dc-54cb-4eb1-8ac2-996a66eca415-kube-api-access-q2pj2" (OuterVolumeSpecName: "kube-api-access-q2pj2") pod "81a956dc-54cb-4eb1-8ac2-996a66eca415" (UID: "81a956dc-54cb-4eb1-8ac2-996a66eca415"). InnerVolumeSpecName "kube-api-access-q2pj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.459656 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2pj2\" (UniqueName: \"kubernetes.io/projected/81a956dc-54cb-4eb1-8ac2-996a66eca415-kube-api-access-q2pj2\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.459696 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81a956dc-54cb-4eb1-8ac2-996a66eca415-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.857199 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fa63-account-create-update-pb8d8" event={"ID":"86b9680d-68e4-4b89-bf5a-4925464b50ef","Type":"ContainerDied","Data":"453101ecd81c64af45fd9e9b62fe4f1a9c5ea96cd4030eeec98410d4c91faa38"} Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.857235 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="453101ecd81c64af45fd9e9b62fe4f1a9c5ea96cd4030eeec98410d4c91faa38" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.858510 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5vf6p" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.858524 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-5vf6p" event={"ID":"81a956dc-54cb-4eb1-8ac2-996a66eca415","Type":"ContainerDied","Data":"af15f42ffdc85f244dd1c0b3caad0f76ace84a34950759327072cf41db4f7c83"} Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.858576 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af15f42ffdc85f244dd1c0b3caad0f76ace84a34950759327072cf41db4f7c83" Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.860122 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b63b-account-create-update-7tx96" event={"ID":"241a5a1d-b18b-4151-9aa9-81d82d723700","Type":"ContainerDied","Data":"cfc22a1ceac78067b1c2d834fbb76a61613a0000f18d1411634c96d6dc89dcca"} Dec 10 11:06:25 crc kubenswrapper[4682]: I1210 11:06:25.860177 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cfc22a1ceac78067b1c2d834fbb76a61613a0000f18d1411634c96d6dc89dcca" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:25.999963 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.025856 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.170401 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkh9h\" (UniqueName: \"kubernetes.io/projected/241a5a1d-b18b-4151-9aa9-81d82d723700-kube-api-access-lkh9h\") pod \"241a5a1d-b18b-4151-9aa9-81d82d723700\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.170443 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86b9680d-68e4-4b89-bf5a-4925464b50ef-operator-scripts\") pod \"86b9680d-68e4-4b89-bf5a-4925464b50ef\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.170582 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241a5a1d-b18b-4151-9aa9-81d82d723700-operator-scripts\") pod \"241a5a1d-b18b-4151-9aa9-81d82d723700\" (UID: \"241a5a1d-b18b-4151-9aa9-81d82d723700\") " Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.170641 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwfqg\" (UniqueName: \"kubernetes.io/projected/86b9680d-68e4-4b89-bf5a-4925464b50ef-kube-api-access-bwfqg\") pod \"86b9680d-68e4-4b89-bf5a-4925464b50ef\" (UID: \"86b9680d-68e4-4b89-bf5a-4925464b50ef\") " Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.171174 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86b9680d-68e4-4b89-bf5a-4925464b50ef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "86b9680d-68e4-4b89-bf5a-4925464b50ef" (UID: "86b9680d-68e4-4b89-bf5a-4925464b50ef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.171270 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241a5a1d-b18b-4151-9aa9-81d82d723700-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "241a5a1d-b18b-4151-9aa9-81d82d723700" (UID: "241a5a1d-b18b-4151-9aa9-81d82d723700"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.190097 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/241a5a1d-b18b-4151-9aa9-81d82d723700-kube-api-access-lkh9h" (OuterVolumeSpecName: "kube-api-access-lkh9h") pod "241a5a1d-b18b-4151-9aa9-81d82d723700" (UID: "241a5a1d-b18b-4151-9aa9-81d82d723700"). InnerVolumeSpecName "kube-api-access-lkh9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.272809 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/241a5a1d-b18b-4151-9aa9-81d82d723700-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.272846 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkh9h\" (UniqueName: \"kubernetes.io/projected/241a5a1d-b18b-4151-9aa9-81d82d723700-kube-api-access-lkh9h\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.272860 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86b9680d-68e4-4b89-bf5a-4925464b50ef-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.311289 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86b9680d-68e4-4b89-bf5a-4925464b50ef-kube-api-access-bwfqg" (OuterVolumeSpecName: "kube-api-access-bwfqg") pod "86b9680d-68e4-4b89-bf5a-4925464b50ef" (UID: "86b9680d-68e4-4b89-bf5a-4925464b50ef"). InnerVolumeSpecName "kube-api-access-bwfqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.377986 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwfqg\" (UniqueName: \"kubernetes.io/projected/86b9680d-68e4-4b89-bf5a-4925464b50ef-kube-api-access-bwfqg\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.900734 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b63b-account-create-update-7tx96" Dec 10 11:06:26 crc kubenswrapper[4682]: I1210 11:06:26.900954 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fa63-account-create-update-pb8d8" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.480970 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.490142 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.503100 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m927d\" (UniqueName: \"kubernetes.io/projected/24b52051-08ad-426d-a9d4-23465f022f28-kube-api-access-m927d\") pod \"24b52051-08ad-426d-a9d4-23465f022f28\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.503210 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twq64\" (UniqueName: \"kubernetes.io/projected/d6473447-e97b-4ce1-bc48-0028c9ac3444-kube-api-access-twq64\") pod \"d6473447-e97b-4ce1-bc48-0028c9ac3444\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.504385 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6473447-e97b-4ce1-bc48-0028c9ac3444-operator-scripts\") pod \"d6473447-e97b-4ce1-bc48-0028c9ac3444\" (UID: \"d6473447-e97b-4ce1-bc48-0028c9ac3444\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.504561 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24b52051-08ad-426d-a9d4-23465f022f28-operator-scripts\") pod \"24b52051-08ad-426d-a9d4-23465f022f28\" (UID: \"24b52051-08ad-426d-a9d4-23465f022f28\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.505061 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6473447-e97b-4ce1-bc48-0028c9ac3444-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d6473447-e97b-4ce1-bc48-0028c9ac3444" (UID: "d6473447-e97b-4ce1-bc48-0028c9ac3444"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.505272 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24b52051-08ad-426d-a9d4-23465f022f28-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "24b52051-08ad-426d-a9d4-23465f022f28" (UID: "24b52051-08ad-426d-a9d4-23465f022f28"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.505706 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d6473447-e97b-4ce1-bc48-0028c9ac3444-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.505739 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24b52051-08ad-426d-a9d4-23465f022f28-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.506263 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.511455 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24b52051-08ad-426d-a9d4-23465f022f28-kube-api-access-m927d" (OuterVolumeSpecName: "kube-api-access-m927d") pod "24b52051-08ad-426d-a9d4-23465f022f28" (UID: "24b52051-08ad-426d-a9d4-23465f022f28"). InnerVolumeSpecName "kube-api-access-m927d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.514085 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6473447-e97b-4ce1-bc48-0028c9ac3444-kube-api-access-twq64" (OuterVolumeSpecName: "kube-api-access-twq64") pod "d6473447-e97b-4ce1-bc48-0028c9ac3444" (UID: "d6473447-e97b-4ce1-bc48-0028c9ac3444"). InnerVolumeSpecName "kube-api-access-twq64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.606192 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bghr\" (UniqueName: \"kubernetes.io/projected/2e662a28-3dfe-43c7-a368-ea48cd6867a8-kube-api-access-4bghr\") pod \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.606269 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e662a28-3dfe-43c7-a368-ea48cd6867a8-operator-scripts\") pod \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\" (UID: \"2e662a28-3dfe-43c7-a368-ea48cd6867a8\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.606628 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m927d\" (UniqueName: \"kubernetes.io/projected/24b52051-08ad-426d-a9d4-23465f022f28-kube-api-access-m927d\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.606645 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twq64\" (UniqueName: \"kubernetes.io/projected/d6473447-e97b-4ce1-bc48-0028c9ac3444-kube-api-access-twq64\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.607119 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e662a28-3dfe-43c7-a368-ea48cd6867a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e662a28-3dfe-43c7-a368-ea48cd6867a8" (UID: "2e662a28-3dfe-43c7-a368-ea48cd6867a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.609191 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e662a28-3dfe-43c7-a368-ea48cd6867a8-kube-api-access-4bghr" (OuterVolumeSpecName: "kube-api-access-4bghr") pod "2e662a28-3dfe-43c7-a368-ea48cd6867a8" (UID: "2e662a28-3dfe-43c7-a368-ea48cd6867a8"). InnerVolumeSpecName "kube-api-access-4bghr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.656938 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.662342 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.707556 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4e45654e-91af-4171-a0f7-e15eac1a40e9-operator-scripts\") pod \"4e45654e-91af-4171-a0f7-e15eac1a40e9\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.707624 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/433ef90f-139b-4a60-918b-ef0a226ee731-operator-scripts\") pod \"433ef90f-139b-4a60-918b-ef0a226ee731\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.707676 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh2th\" (UniqueName: \"kubernetes.io/projected/4e45654e-91af-4171-a0f7-e15eac1a40e9-kube-api-access-bh2th\") pod \"4e45654e-91af-4171-a0f7-e15eac1a40e9\" (UID: \"4e45654e-91af-4171-a0f7-e15eac1a40e9\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.707741 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2t72\" (UniqueName: \"kubernetes.io/projected/433ef90f-139b-4a60-918b-ef0a226ee731-kube-api-access-h2t72\") pod \"433ef90f-139b-4a60-918b-ef0a226ee731\" (UID: \"433ef90f-139b-4a60-918b-ef0a226ee731\") " Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.708109 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bghr\" (UniqueName: \"kubernetes.io/projected/2e662a28-3dfe-43c7-a368-ea48cd6867a8-kube-api-access-4bghr\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.708132 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e662a28-3dfe-43c7-a368-ea48cd6867a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.708201 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e45654e-91af-4171-a0f7-e15eac1a40e9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4e45654e-91af-4171-a0f7-e15eac1a40e9" (UID: "4e45654e-91af-4171-a0f7-e15eac1a40e9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.708229 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/433ef90f-139b-4a60-918b-ef0a226ee731-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "433ef90f-139b-4a60-918b-ef0a226ee731" (UID: "433ef90f-139b-4a60-918b-ef0a226ee731"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.725486 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e45654e-91af-4171-a0f7-e15eac1a40e9-kube-api-access-bh2th" (OuterVolumeSpecName: "kube-api-access-bh2th") pod "4e45654e-91af-4171-a0f7-e15eac1a40e9" (UID: "4e45654e-91af-4171-a0f7-e15eac1a40e9"). InnerVolumeSpecName "kube-api-access-bh2th". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.725554 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/433ef90f-139b-4a60-918b-ef0a226ee731-kube-api-access-h2t72" (OuterVolumeSpecName: "kube-api-access-h2t72") pod "433ef90f-139b-4a60-918b-ef0a226ee731" (UID: "433ef90f-139b-4a60-918b-ef0a226ee731"). InnerVolumeSpecName "kube-api-access-h2t72". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.810006 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4e45654e-91af-4171-a0f7-e15eac1a40e9-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.810072 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/433ef90f-139b-4a60-918b-ef0a226ee731-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.810083 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh2th\" (UniqueName: \"kubernetes.io/projected/4e45654e-91af-4171-a0f7-e15eac1a40e9-kube-api-access-bh2th\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.810095 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2t72\" (UniqueName: \"kubernetes.io/projected/433ef90f-139b-4a60-918b-ef0a226ee731-kube-api-access-h2t72\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.911904 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.911870 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-04dc-account-create-update-ddvxc" event={"ID":"4e45654e-91af-4171-a0f7-e15eac1a40e9","Type":"ContainerDied","Data":"b242e80aa08583af3662116253018b1953a50f89fa263e4ce53a0235db7b1c30"} Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.912154 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b242e80aa08583af3662116253018b1953a50f89fa263e4ce53a0235db7b1c30" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.918028 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-create-pglvk" event={"ID":"2e662a28-3dfe-43c7-a368-ea48cd6867a8","Type":"ContainerDied","Data":"54f148ce6ffe35b288671dcaaa122ff0620fd589f14a3d9222a9f1e8f9f58420"} Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.918060 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54f148ce6ffe35b288671dcaaa122ff0620fd589f14a3d9222a9f1e8f9f58420" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.918066 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-create-pglvk" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.921526 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9b10-account-create-update-p68fr" event={"ID":"24b52051-08ad-426d-a9d4-23465f022f28","Type":"ContainerDied","Data":"02155f9f4f62076a64128c363257e7e77025d845561731f59503901ea0352616"} Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.921557 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02155f9f4f62076a64128c363257e7e77025d845561731f59503901ea0352616" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.921561 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9b10-account-create-update-p68fr" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.926355 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f598ce2d-df0a-4477-8c89-126cc5d3a5be","Type":"ContainerStarted","Data":"44937547a710e113b40567756bc176ba80dee65a57853af7e3501c34de64362e"} Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.931749 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-54zjl" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.931779 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-54zjl" event={"ID":"433ef90f-139b-4a60-918b-ef0a226ee731","Type":"ContainerDied","Data":"3e5591bd3b71654cfefbbb9be455974e21b9c4a97b971b3565541f344f276555"} Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.931814 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e5591bd3b71654cfefbbb9be455974e21b9c4a97b971b3565541f344f276555" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.933562 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bxmng" event={"ID":"d6473447-e97b-4ce1-bc48-0028c9ac3444","Type":"ContainerDied","Data":"a9e6718d825285661c741894a0cd56c03091732025c261ef1299a9414ae5c79a"} Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.933583 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9e6718d825285661c741894a0cd56c03091732025c261ef1299a9414ae5c79a" Dec 10 11:06:27 crc kubenswrapper[4682]: I1210 11:06:27.933631 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bxmng" Dec 10 11:06:28 crc kubenswrapper[4682]: I1210 11:06:28.484718 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-lokistack-ingester-0" Dec 10 11:06:37 crc kubenswrapper[4682]: I1210 11:06:37.029598 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f598ce2d-df0a-4477-8c89-126cc5d3a5be","Type":"ContainerStarted","Data":"1d5a3bdfe960e4452ab5ea40e5b246a2137a80aed27e7c68ce186cf126c995f8"} Dec 10 11:06:37 crc kubenswrapper[4682]: I1210 11:06:37.031459 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ms2nv" event={"ID":"327df6d2-4568-45e6-a719-650e8881d7cc","Type":"ContainerStarted","Data":"d0aabb3b100c7ef3730f7cadcb298843d3f9108639a542c744a1b631f934f1c9"} Dec 10 11:06:37 crc kubenswrapper[4682]: I1210 11:06:37.036282 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"e4749f74debf5722e51c2d18bb012439ec9f5defa328a676c09166bb1466db75"} Dec 10 11:06:37 crc kubenswrapper[4682]: I1210 11:06:37.074948 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=25.074925013 podStartE2EDuration="25.074925013s" podCreationTimestamp="2025-12-10 11:06:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:37.056399813 +0000 UTC m=+1277.376610583" watchObservedRunningTime="2025-12-10 11:06:37.074925013 +0000 UTC m=+1277.395135763" Dec 10 11:06:37 crc kubenswrapper[4682]: I1210 11:06:37.077161 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-ms2nv" podStartSLOduration=2.217392549 podStartE2EDuration="16.077147642s" podCreationTimestamp="2025-12-10 11:06:21 +0000 UTC" firstStartedPulling="2025-12-10 11:06:22.758707798 +0000 UTC m=+1263.078918548" lastFinishedPulling="2025-12-10 11:06:36.618462891 +0000 UTC m=+1276.938673641" observedRunningTime="2025-12-10 11:06:37.07290996 +0000 UTC m=+1277.393120720" watchObservedRunningTime="2025-12-10 11:06:37.077147642 +0000 UTC m=+1277.397358392" Dec 10 11:06:37 crc kubenswrapper[4682]: I1210 11:06:37.783519 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:38 crc kubenswrapper[4682]: I1210 11:06:38.045346 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bxlhz" event={"ID":"4ca63023-1a06-43a7-b9e4-1235b76b8ec8","Type":"ContainerStarted","Data":"20bd3b52de812a92adcb4ad276a9c0c51f28a4add7f8faba5813bee064947674"} Dec 10 11:06:38 crc kubenswrapper[4682]: I1210 11:06:38.051630 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"b156fcc1048258ab58b17d891f711cebe533bfa53e162bf9ec90b82df73d80b4"} Dec 10 11:06:38 crc kubenswrapper[4682]: I1210 11:06:38.051672 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"1c93a631f39c8081b6e9cf579fc4a25c599c0df3ae4fda613e608740fb716029"} Dec 10 11:06:38 crc kubenswrapper[4682]: I1210 11:06:38.051684 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"1decfe9a8f56a80463bb917b37b53eee729a46f6febd5ca7961fed7faac89338"} Dec 10 11:06:38 crc kubenswrapper[4682]: I1210 11:06:38.076907 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-bxlhz" podStartSLOduration=4.74187214 podStartE2EDuration="20.076857726s" podCreationTimestamp="2025-12-10 11:06:18 +0000 UTC" firstStartedPulling="2025-12-10 11:06:21.309943164 +0000 UTC m=+1261.630153914" lastFinishedPulling="2025-12-10 11:06:36.64492875 +0000 UTC m=+1276.965139500" observedRunningTime="2025-12-10 11:06:38.069115824 +0000 UTC m=+1278.389326594" watchObservedRunningTime="2025-12-10 11:06:38.076857726 +0000 UTC m=+1278.397068476" Dec 10 11:06:39 crc kubenswrapper[4682]: I1210 11:06:39.065199 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"a62b892ce7ef602b9c0a982ab89370e6cd474146748c75f23b47b0225d7de871"} Dec 10 11:06:39 crc kubenswrapper[4682]: I1210 11:06:39.065529 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"88943c0d84c417ee641a6051a6c8546e28b9d4e24718d3e2de3a0c459cd70ff9"} Dec 10 11:06:39 crc kubenswrapper[4682]: I1210 11:06:39.065541 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"ab2cb16b37d40816ca1d1d48d132bc7e68f3dcc3b5e1ee13be76526ed46c61b7"} Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.091530 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"cb1cba2a78ed863607a7d3189781c9a7a4d5453e8779fa8ef0f23b418206e136"} Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.091951 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"44ee3032f7085f484e60bed676719f42b870685dc3f1bc4ccee0a0faa9e8d5e2"} Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.091967 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"78c6157efa82006a2c025a11fff957def6370d0063590705fe314b9b5754a001"} Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.091979 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b7a82b72-0262-4a74-becf-36ead02cb92c","Type":"ContainerStarted","Data":"f2c008db252106acde108351b1631f80ee9af4bad5347923edc1df494239df2d"} Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.163736 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.08279999 podStartE2EDuration="55.163707713s" podCreationTimestamp="2025-12-10 11:05:45 +0000 UTC" firstStartedPulling="2025-12-10 11:06:20.406952981 +0000 UTC m=+1260.727163731" lastFinishedPulling="2025-12-10 11:06:38.487860704 +0000 UTC m=+1278.808071454" observedRunningTime="2025-12-10 11:06:40.148567638 +0000 UTC m=+1280.468778418" watchObservedRunningTime="2025-12-10 11:06:40.163707713 +0000 UTC m=+1280.483918453" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.423803 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8shsw"] Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424230 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="433ef90f-139b-4a60-918b-ef0a226ee731" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424254 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="433ef90f-139b-4a60-918b-ef0a226ee731" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424267 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81a956dc-54cb-4eb1-8ac2-996a66eca415" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424275 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="81a956dc-54cb-4eb1-8ac2-996a66eca415" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424292 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86b9680d-68e4-4b89-bf5a-4925464b50ef" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424301 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="86b9680d-68e4-4b89-bf5a-4925464b50ef" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424326 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24b52051-08ad-426d-a9d4-23465f022f28" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424334 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="24b52051-08ad-426d-a9d4-23465f022f28" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424353 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e45654e-91af-4171-a0f7-e15eac1a40e9" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424361 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e45654e-91af-4171-a0f7-e15eac1a40e9" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424374 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="241a5a1d-b18b-4151-9aa9-81d82d723700" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424381 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="241a5a1d-b18b-4151-9aa9-81d82d723700" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424390 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6473447-e97b-4ce1-bc48-0028c9ac3444" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424395 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6473447-e97b-4ce1-bc48-0028c9ac3444" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: E1210 11:06:40.424402 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e662a28-3dfe-43c7-a368-ea48cd6867a8" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424407 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e662a28-3dfe-43c7-a368-ea48cd6867a8" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424641 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="86b9680d-68e4-4b89-bf5a-4925464b50ef" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424659 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e45654e-91af-4171-a0f7-e15eac1a40e9" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424667 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="81a956dc-54cb-4eb1-8ac2-996a66eca415" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424680 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="24b52051-08ad-426d-a9d4-23465f022f28" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424690 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="241a5a1d-b18b-4151-9aa9-81d82d723700" containerName="mariadb-account-create-update" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424713 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="433ef90f-139b-4a60-918b-ef0a226ee731" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424722 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e662a28-3dfe-43c7-a368-ea48cd6867a8" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.424736 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6473447-e97b-4ce1-bc48-0028c9ac3444" containerName="mariadb-database-create" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.425896 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.431794 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.438289 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8shsw"] Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.474213 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr2jx\" (UniqueName: \"kubernetes.io/projected/cf92e34b-f212-4792-8712-c14f391746cf-kube-api-access-gr2jx\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.474264 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.474427 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.474585 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.474712 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-config\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.474810 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576140 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-config\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576215 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576319 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr2jx\" (UniqueName: \"kubernetes.io/projected/cf92e34b-f212-4792-8712-c14f391746cf-kube-api-access-gr2jx\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576354 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576391 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576454 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.576918 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.577444 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.577449 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.577503 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-config\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.577506 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.599180 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr2jx\" (UniqueName: \"kubernetes.io/projected/cf92e34b-f212-4792-8712-c14f391746cf-kube-api-access-gr2jx\") pod \"dnsmasq-dns-6d5b6d6b67-8shsw\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:40 crc kubenswrapper[4682]: I1210 11:06:40.751755 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:41 crc kubenswrapper[4682]: I1210 11:06:41.220495 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8shsw"] Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.109317 4682 generic.go:334] "Generic (PLEG): container finished" podID="cf92e34b-f212-4792-8712-c14f391746cf" containerID="1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a" exitCode=0 Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.109403 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" event={"ID":"cf92e34b-f212-4792-8712-c14f391746cf","Type":"ContainerDied","Data":"1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a"} Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.109714 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" event={"ID":"cf92e34b-f212-4792-8712-c14f391746cf","Type":"ContainerStarted","Data":"92c9adf67ba67c07de34f9cf93c7287ccbe98cef05030c4d6b8b2a48729744b5"} Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.112463 4682 generic.go:334] "Generic (PLEG): container finished" podID="327df6d2-4568-45e6-a719-650e8881d7cc" containerID="d0aabb3b100c7ef3730f7cadcb298843d3f9108639a542c744a1b631f934f1c9" exitCode=0 Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.112536 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ms2nv" event={"ID":"327df6d2-4568-45e6-a719-650e8881d7cc","Type":"ContainerDied","Data":"d0aabb3b100c7ef3730f7cadcb298843d3f9108639a542c744a1b631f934f1c9"} Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.782750 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:42 crc kubenswrapper[4682]: I1210 11:06:42.800874 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.122523 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" event={"ID":"cf92e34b-f212-4792-8712-c14f391746cf","Type":"ContainerStarted","Data":"30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3"} Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.127811 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.146921 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" podStartSLOduration=3.146902064 podStartE2EDuration="3.146902064s" podCreationTimestamp="2025-12-10 11:06:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:43.1432659 +0000 UTC m=+1283.463476670" watchObservedRunningTime="2025-12-10 11:06:43.146902064 +0000 UTC m=+1283.467112814" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.640407 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.816976 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbgrk\" (UniqueName: \"kubernetes.io/projected/327df6d2-4568-45e6-a719-650e8881d7cc-kube-api-access-nbgrk\") pod \"327df6d2-4568-45e6-a719-650e8881d7cc\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.817036 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-config-data\") pod \"327df6d2-4568-45e6-a719-650e8881d7cc\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.817094 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-combined-ca-bundle\") pod \"327df6d2-4568-45e6-a719-650e8881d7cc\" (UID: \"327df6d2-4568-45e6-a719-650e8881d7cc\") " Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.822159 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/327df6d2-4568-45e6-a719-650e8881d7cc-kube-api-access-nbgrk" (OuterVolumeSpecName: "kube-api-access-nbgrk") pod "327df6d2-4568-45e6-a719-650e8881d7cc" (UID: "327df6d2-4568-45e6-a719-650e8881d7cc"). InnerVolumeSpecName "kube-api-access-nbgrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.844664 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "327df6d2-4568-45e6-a719-650e8881d7cc" (UID: "327df6d2-4568-45e6-a719-650e8881d7cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.866615 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-config-data" (OuterVolumeSpecName: "config-data") pod "327df6d2-4568-45e6-a719-650e8881d7cc" (UID: "327df6d2-4568-45e6-a719-650e8881d7cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.919587 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbgrk\" (UniqueName: \"kubernetes.io/projected/327df6d2-4568-45e6-a719-650e8881d7cc-kube-api-access-nbgrk\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.919812 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:43 crc kubenswrapper[4682]: I1210 11:06:43.919833 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/327df6d2-4568-45e6-a719-650e8881d7cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.133105 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ms2nv" event={"ID":"327df6d2-4568-45e6-a719-650e8881d7cc","Type":"ContainerDied","Data":"f4cb21cde031bde310324bb141a1336b2bc1de82fef1f9d5efb64d0ed2db5a82"} Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.133417 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4cb21cde031bde310324bb141a1336b2bc1de82fef1f9d5efb64d0ed2db5a82" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.133332 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ms2nv" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.133612 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.451101 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8shsw"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.474347 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-wrl96"] Dec 10 11:06:44 crc kubenswrapper[4682]: E1210 11:06:44.474755 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327df6d2-4568-45e6-a719-650e8881d7cc" containerName="keystone-db-sync" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.474770 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="327df6d2-4568-45e6-a719-650e8881d7cc" containerName="keystone-db-sync" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.474956 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="327df6d2-4568-45e6-a719-650e8881d7cc" containerName="keystone-db-sync" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.475932 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.485051 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gxgf6"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.486843 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.492691 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.492745 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.493061 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.493199 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.493880 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-44cvq" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.501109 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gxgf6"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.513234 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-wrl96"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651031 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-fernet-keys\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651073 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqg7x\" (UniqueName: \"kubernetes.io/projected/f06ea747-bea0-4ce1-8600-afc308d91c82-kube-api-access-nqg7x\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651123 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-config-data\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651138 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-combined-ca-bundle\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651179 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-config\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651203 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-scripts\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651262 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651300 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651323 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651349 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v2lq\" (UniqueName: \"kubernetes.io/projected/6bb54d58-fcda-417c-953a-b198f1344c5d-kube-api-access-6v2lq\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651373 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.651398 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-credential-keys\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.680790 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-tx82q"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.682918 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.689009 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.689226 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.689330 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rx4sr" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.728493 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tx82q"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.744662 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-8h57v"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.747596 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753273 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-fernet-keys\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753335 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqg7x\" (UniqueName: \"kubernetes.io/projected/f06ea747-bea0-4ce1-8600-afc308d91c82-kube-api-access-nqg7x\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753393 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-config-data\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753411 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-combined-ca-bundle\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753454 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-config\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753485 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-scripts\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753539 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753574 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753595 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753672 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v2lq\" (UniqueName: \"kubernetes.io/projected/6bb54d58-fcda-417c-953a-b198f1344c5d-kube-api-access-6v2lq\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753699 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.753728 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-credential-keys\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.757225 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.757965 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.764192 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-pc8qf" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.784143 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-fernet-keys\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.784286 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-config-data\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.784511 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-nb\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.785503 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-config\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.806172 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-svc\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.807542 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-swift-storage-0\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.819146 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-credential-keys\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.820703 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-sb\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890082 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-db-sync-config-data\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890219 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfssm\" (UniqueName: \"kubernetes.io/projected/b9208fd9-c069-4f27-868c-e248ef7970c0-kube-api-access-zfssm\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890302 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-scripts\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890326 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-combined-ca-bundle\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890536 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-combined-ca-bundle\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890584 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9gcl\" (UniqueName: \"kubernetes.io/projected/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-kube-api-access-f9gcl\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890877 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-etc-machine-id\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890903 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-config-data\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.890922 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-config\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.891697 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v2lq\" (UniqueName: \"kubernetes.io/projected/6bb54d58-fcda-417c-953a-b198f1344c5d-kube-api-access-6v2lq\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.892224 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqg7x\" (UniqueName: \"kubernetes.io/projected/f06ea747-bea0-4ce1-8600-afc308d91c82-kube-api-access-nqg7x\") pod \"dnsmasq-dns-6f8c45789f-wrl96\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.895009 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-scripts\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.912995 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-combined-ca-bundle\") pod \"keystone-bootstrap-gxgf6\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.929640 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.934008 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.946617 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.947076 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.950661 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8h57v"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.963648 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-6g6l7"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.966263 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.969918 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.970054 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-d46pv" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.979620 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992198 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-combined-ca-bundle\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992237 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9gcl\" (UniqueName: \"kubernetes.io/projected/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-kube-api-access-f9gcl\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992260 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992280 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992325 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-etc-machine-id\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992343 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-config-data\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992358 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-config\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992385 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-db-sync-config-data\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992413 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-log-httpd\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992439 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfssm\" (UniqueName: \"kubernetes.io/projected/b9208fd9-c069-4f27-868c-e248ef7970c0-kube-api-access-zfssm\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992454 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-config-data\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992502 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twnj7\" (UniqueName: \"kubernetes.io/projected/97c96a0f-0978-472b-b04a-6b1f0850b97c-kube-api-access-twnj7\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992523 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-scripts\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992537 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-combined-ca-bundle\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992562 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-run-httpd\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.992586 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-scripts\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.993834 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-etc-machine-id\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.996982 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-db-sync-config-data\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:44 crc kubenswrapper[4682]: I1210 11:06:44.998361 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-config\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.000211 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-combined-ca-bundle\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.000660 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-scripts\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.002012 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-combined-ca-bundle\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.016584 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-config-data\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.018724 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6g6l7"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.029253 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfssm\" (UniqueName: \"kubernetes.io/projected/b9208fd9-c069-4f27-868c-e248ef7970c0-kube-api-access-zfssm\") pod \"neutron-db-sync-tx82q\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.039247 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9gcl\" (UniqueName: \"kubernetes.io/projected/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-kube-api-access-f9gcl\") pod \"cinder-db-sync-8h57v\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.039373 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-wrl96"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.040096 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.074723 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-h5fss"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.076220 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.080970 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-slbcz" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.081220 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.081444 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.089023 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-h5fss"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094197 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094245 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094291 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbcqm\" (UniqueName: \"kubernetes.io/projected/c6258156-0c39-4f7b-a367-954f1eb68718-kube-api-access-cbcqm\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094365 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-log-httpd\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094395 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-config-data\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094426 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twnj7\" (UniqueName: \"kubernetes.io/projected/97c96a0f-0978-472b-b04a-6b1f0850b97c-kube-api-access-twnj7\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094458 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-run-httpd\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094501 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-scripts\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094542 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-db-sync-config-data\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.094599 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-combined-ca-bundle\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.095505 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-run-httpd\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.095817 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-log-httpd\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.101169 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-scripts\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.103867 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-st98q"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.105283 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.106574 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.112291 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.117439 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-st98q"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.118154 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twnj7\" (UniqueName: \"kubernetes.io/projected/97c96a0f-0978-472b-b04a-6b1f0850b97c-kube-api-access-twnj7\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.126258 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.127652 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-config-data\") pod \"ceilometer-0\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.148619 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8h57v" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.154596 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-db-sync-9q89f"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.157875 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.163057 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-scripts" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.163459 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-cloudkitty-dockercfg-6svbf" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.164646 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-config-data" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.164861 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-client-internal" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.167038 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-9q89f"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.195914 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-db-sync-config-data\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196106 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-combined-ca-bundle\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196129 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-config\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196192 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62pkv\" (UniqueName: \"kubernetes.io/projected/61016d34-59f1-41cf-b7e7-0163f4507336-kube-api-access-62pkv\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196222 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbcqm\" (UniqueName: \"kubernetes.io/projected/c6258156-0c39-4f7b-a367-954f1eb68718-kube-api-access-cbcqm\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196250 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196273 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196297 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p647q\" (UniqueName: \"kubernetes.io/projected/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-kube-api-access-p647q\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196326 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-scripts\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196347 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196382 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-config-data\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196401 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-logs\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196428 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-combined-ca-bundle\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.196460 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.204999 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-combined-ca-bundle\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.205427 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-db-sync-config-data\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.222154 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbcqm\" (UniqueName: \"kubernetes.io/projected/c6258156-0c39-4f7b-a367-954f1eb68718-kube-api-access-cbcqm\") pod \"barbican-db-sync-6g6l7\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298328 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62pkv\" (UniqueName: \"kubernetes.io/projected/61016d34-59f1-41cf-b7e7-0163f4507336-kube-api-access-62pkv\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298403 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-config-data\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298429 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298453 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298558 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbvjs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-kube-api-access-xbvjs\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298591 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-scripts\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298621 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p647q\" (UniqueName: \"kubernetes.io/projected/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-kube-api-access-p647q\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298653 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-scripts\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298681 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298709 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-combined-ca-bundle\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298754 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-config-data\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298776 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-logs\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298831 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-combined-ca-bundle\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298867 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298903 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-certs\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.298946 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-config\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.300969 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-svc\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.303318 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-swift-storage-0\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.303370 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-config\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.304805 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-sb\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.306588 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-nb\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.307358 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.307533 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-logs\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.310645 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-combined-ca-bundle\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.313174 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-config-data\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.318812 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62pkv\" (UniqueName: \"kubernetes.io/projected/61016d34-59f1-41cf-b7e7-0163f4507336-kube-api-access-62pkv\") pod \"dnsmasq-dns-fcfdd6f9f-st98q\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.319613 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tx82q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.320962 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-scripts\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.332259 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.359368 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p647q\" (UniqueName: \"kubernetes.io/projected/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-kube-api-access-p647q\") pod \"placement-db-sync-h5fss\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.447940 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.448523 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-h5fss" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.450540 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-certs\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.451209 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-config-data\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.451246 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbvjs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-kube-api-access-xbvjs\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.451296 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-scripts\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.451366 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-combined-ca-bundle\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.454359 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-config-data\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.455395 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-certs\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.455454 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-combined-ca-bundle\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.460262 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-scripts\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.482232 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbvjs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-kube-api-access-xbvjs\") pod \"cloudkitty-db-sync-9q89f\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.498149 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.628967 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-wrl96"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.851022 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8h57v"] Dec 10 11:06:45 crc kubenswrapper[4682]: I1210 11:06:45.891625 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gxgf6"] Dec 10 11:06:45 crc kubenswrapper[4682]: W1210 11:06:45.922110 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6bb54d58_fcda_417c_953a_b198f1344c5d.slice/crio-58f3159a29f192c075464596c7ac266153590f740ee2d93923a3ecf0daf10258 WatchSource:0}: Error finding container 58f3159a29f192c075464596c7ac266153590f740ee2d93923a3ecf0daf10258: Status 404 returned error can't find the container with id 58f3159a29f192c075464596c7ac266153590f740ee2d93923a3ecf0daf10258 Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.147605 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.211975 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8h57v" event={"ID":"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb","Type":"ContainerStarted","Data":"21d9b2c38ae39e2151956d2c529711a1b843efaa9b2618c9ac16b57f3627466b"} Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.224827 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gxgf6" event={"ID":"6bb54d58-fcda-417c-953a-b198f1344c5d","Type":"ContainerStarted","Data":"58f3159a29f192c075464596c7ac266153590f740ee2d93923a3ecf0daf10258"} Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.226687 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" event={"ID":"f06ea747-bea0-4ce1-8600-afc308d91c82","Type":"ContainerStarted","Data":"a980991e41c3e3f709918449ba276e3ec11032f3efd87d7cce1ff0121080e1f6"} Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.226752 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" podUID="cf92e34b-f212-4792-8712-c14f391746cf" containerName="dnsmasq-dns" containerID="cri-o://30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3" gracePeriod=10 Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.360118 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-6g6l7"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.522746 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-9q89f"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.533081 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-st98q"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.541333 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tx82q"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.714483 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-h5fss"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.864512 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:06:46 crc kubenswrapper[4682]: I1210 11:06:46.895056 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:46.995400 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-sb\") pod \"cf92e34b-f212-4792-8712-c14f391746cf\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:46.995559 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-svc\") pod \"cf92e34b-f212-4792-8712-c14f391746cf\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:46.995633 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-config\") pod \"cf92e34b-f212-4792-8712-c14f391746cf\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:46.995663 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-nb\") pod \"cf92e34b-f212-4792-8712-c14f391746cf\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:46.995683 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr2jx\" (UniqueName: \"kubernetes.io/projected/cf92e34b-f212-4792-8712-c14f391746cf-kube-api-access-gr2jx\") pod \"cf92e34b-f212-4792-8712-c14f391746cf\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:46.995742 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-swift-storage-0\") pod \"cf92e34b-f212-4792-8712-c14f391746cf\" (UID: \"cf92e34b-f212-4792-8712-c14f391746cf\") " Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.015560 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf92e34b-f212-4792-8712-c14f391746cf-kube-api-access-gr2jx" (OuterVolumeSpecName: "kube-api-access-gr2jx") pod "cf92e34b-f212-4792-8712-c14f391746cf" (UID: "cf92e34b-f212-4792-8712-c14f391746cf"). InnerVolumeSpecName "kube-api-access-gr2jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.104968 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr2jx\" (UniqueName: \"kubernetes.io/projected/cf92e34b-f212-4792-8712-c14f391746cf-kube-api-access-gr2jx\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.170317 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cf92e34b-f212-4792-8712-c14f391746cf" (UID: "cf92e34b-f212-4792-8712-c14f391746cf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.170912 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cf92e34b-f212-4792-8712-c14f391746cf" (UID: "cf92e34b-f212-4792-8712-c14f391746cf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.209941 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.209983 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.257079 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-config" (OuterVolumeSpecName: "config") pod "cf92e34b-f212-4792-8712-c14f391746cf" (UID: "cf92e34b-f212-4792-8712-c14f391746cf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.257390 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cf92e34b-f212-4792-8712-c14f391746cf" (UID: "cf92e34b-f212-4792-8712-c14f391746cf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.307596 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cf92e34b-f212-4792-8712-c14f391746cf" (UID: "cf92e34b-f212-4792-8712-c14f391746cf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.308157 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gxgf6" event={"ID":"6bb54d58-fcda-417c-953a-b198f1344c5d","Type":"ContainerStarted","Data":"45818218304beb7ae2a3a3ceada5cb07bd1e5c66b3ecda6a0ae2e3fe59274616"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.313556 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.313580 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.313591 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cf92e34b-f212-4792-8712-c14f391746cf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.322522 4682 generic.go:334] "Generic (PLEG): container finished" podID="f06ea747-bea0-4ce1-8600-afc308d91c82" containerID="5c8128186b8b7e9ff75f2846ef2abbad8c86372ba0f8a4ecdd8b06bb13111427" exitCode=0 Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.322586 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" event={"ID":"f06ea747-bea0-4ce1-8600-afc308d91c82","Type":"ContainerDied","Data":"5c8128186b8b7e9ff75f2846ef2abbad8c86372ba0f8a4ecdd8b06bb13111427"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.336870 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-h5fss" event={"ID":"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54","Type":"ContainerStarted","Data":"f7c838cc1cf437d79717bbc246a17a2ef34e73d3a0224dfeefe967ad7869aa81"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.337071 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gxgf6" podStartSLOduration=3.337051463 podStartE2EDuration="3.337051463s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:47.335111132 +0000 UTC m=+1287.655321882" watchObservedRunningTime="2025-12-10 11:06:47.337051463 +0000 UTC m=+1287.657262213" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.340816 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-9q89f" event={"ID":"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d","Type":"ContainerStarted","Data":"2a65ad3ed36e3c0a9cd005f63ae5d71b4fbba1900fc6615234bcbe9568f123c3"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.345115 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerStarted","Data":"0a50aae77d9c2c4e19b20e145e9cf51614a96914069846326cf9ffa10c59608b"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.347765 4682 generic.go:334] "Generic (PLEG): container finished" podID="cf92e34b-f212-4792-8712-c14f391746cf" containerID="30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3" exitCode=0 Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.347835 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" event={"ID":"cf92e34b-f212-4792-8712-c14f391746cf","Type":"ContainerDied","Data":"30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.347866 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" event={"ID":"cf92e34b-f212-4792-8712-c14f391746cf","Type":"ContainerDied","Data":"92c9adf67ba67c07de34f9cf93c7287ccbe98cef05030c4d6b8b2a48729744b5"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.347886 4682 scope.go:117] "RemoveContainer" containerID="30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.348050 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8shsw" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.379781 4682 generic.go:334] "Generic (PLEG): container finished" podID="4ca63023-1a06-43a7-b9e4-1235b76b8ec8" containerID="20bd3b52de812a92adcb4ad276a9c0c51f28a4add7f8faba5813bee064947674" exitCode=0 Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.379887 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bxlhz" event={"ID":"4ca63023-1a06-43a7-b9e4-1235b76b8ec8","Type":"ContainerDied","Data":"20bd3b52de812a92adcb4ad276a9c0c51f28a4add7f8faba5813bee064947674"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.386109 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" event={"ID":"61016d34-59f1-41cf-b7e7-0163f4507336","Type":"ContainerStarted","Data":"855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.386146 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" event={"ID":"61016d34-59f1-41cf-b7e7-0163f4507336","Type":"ContainerStarted","Data":"45041a8730d54154da7a98c9b2a98bdbc3360d548bce2a1a1ae5256abd6a4f6c"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.405600 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6g6l7" event={"ID":"c6258156-0c39-4f7b-a367-954f1eb68718","Type":"ContainerStarted","Data":"03c7691688f184222240ac0b59ca9becb93b1acfe0a9c71897e08a56ac829fab"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.409763 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tx82q" event={"ID":"b9208fd9-c069-4f27-868c-e248ef7970c0","Type":"ContainerStarted","Data":"1171262eb5fd830e2362004bf67fd313cacc92b677df7bec7f101e53b6967961"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.409804 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tx82q" event={"ID":"b9208fd9-c069-4f27-868c-e248ef7970c0","Type":"ContainerStarted","Data":"de54bd86cf8eaedd79ded1615bfc78bdfddee5ee9d1256038fa69148a430746c"} Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.433001 4682 scope.go:117] "RemoveContainer" containerID="1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.472764 4682 scope.go:117] "RemoveContainer" containerID="30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3" Dec 10 11:06:47 crc kubenswrapper[4682]: E1210 11:06:47.477591 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3\": container with ID starting with 30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3 not found: ID does not exist" containerID="30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.477628 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3"} err="failed to get container status \"30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3\": rpc error: code = NotFound desc = could not find container \"30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3\": container with ID starting with 30a9469d6b8d5844e3296480c47b2ab5789dc54abe002df93d55f3ab576055a3 not found: ID does not exist" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.477650 4682 scope.go:117] "RemoveContainer" containerID="1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a" Dec 10 11:06:47 crc kubenswrapper[4682]: E1210 11:06:47.478196 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a\": container with ID starting with 1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a not found: ID does not exist" containerID="1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.478218 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a"} err="failed to get container status \"1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a\": rpc error: code = NotFound desc = could not find container \"1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a\": container with ID starting with 1c36072ac10408fd4e9f6061acaf9d3f1db6ce7d98ee23fe621aa293defd3c8a not found: ID does not exist" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.488503 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-tx82q" podStartSLOduration=3.488483367 podStartE2EDuration="3.488483367s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:47.437863641 +0000 UTC m=+1287.758074391" watchObservedRunningTime="2025-12-10 11:06:47.488483367 +0000 UTC m=+1287.808694117" Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.504400 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8shsw"] Dec 10 11:06:47 crc kubenswrapper[4682]: I1210 11:06:47.520412 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8shsw"] Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.034938 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.146177 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqg7x\" (UniqueName: \"kubernetes.io/projected/f06ea747-bea0-4ce1-8600-afc308d91c82-kube-api-access-nqg7x\") pod \"f06ea747-bea0-4ce1-8600-afc308d91c82\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.146399 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-nb\") pod \"f06ea747-bea0-4ce1-8600-afc308d91c82\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.146435 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-swift-storage-0\") pod \"f06ea747-bea0-4ce1-8600-afc308d91c82\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.146527 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-sb\") pod \"f06ea747-bea0-4ce1-8600-afc308d91c82\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.146756 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-config\") pod \"f06ea747-bea0-4ce1-8600-afc308d91c82\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.146784 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-svc\") pod \"f06ea747-bea0-4ce1-8600-afc308d91c82\" (UID: \"f06ea747-bea0-4ce1-8600-afc308d91c82\") " Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.151380 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f06ea747-bea0-4ce1-8600-afc308d91c82-kube-api-access-nqg7x" (OuterVolumeSpecName: "kube-api-access-nqg7x") pod "f06ea747-bea0-4ce1-8600-afc308d91c82" (UID: "f06ea747-bea0-4ce1-8600-afc308d91c82"). InnerVolumeSpecName "kube-api-access-nqg7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.179525 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f06ea747-bea0-4ce1-8600-afc308d91c82" (UID: "f06ea747-bea0-4ce1-8600-afc308d91c82"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.190553 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f06ea747-bea0-4ce1-8600-afc308d91c82" (UID: "f06ea747-bea0-4ce1-8600-afc308d91c82"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.200669 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f06ea747-bea0-4ce1-8600-afc308d91c82" (UID: "f06ea747-bea0-4ce1-8600-afc308d91c82"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.212947 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-config" (OuterVolumeSpecName: "config") pod "f06ea747-bea0-4ce1-8600-afc308d91c82" (UID: "f06ea747-bea0-4ce1-8600-afc308d91c82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.226206 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f06ea747-bea0-4ce1-8600-afc308d91c82" (UID: "f06ea747-bea0-4ce1-8600-afc308d91c82"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.250050 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqg7x\" (UniqueName: \"kubernetes.io/projected/f06ea747-bea0-4ce1-8600-afc308d91c82-kube-api-access-nqg7x\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.251089 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.251140 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.251157 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.251171 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.251182 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f06ea747-bea0-4ce1-8600-afc308d91c82-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.405172 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf92e34b-f212-4792-8712-c14f391746cf" path="/var/lib/kubelet/pods/cf92e34b-f212-4792-8712-c14f391746cf/volumes" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.432224 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" event={"ID":"f06ea747-bea0-4ce1-8600-afc308d91c82","Type":"ContainerDied","Data":"a980991e41c3e3f709918449ba276e3ec11032f3efd87d7cce1ff0121080e1f6"} Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.432276 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f8c45789f-wrl96" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.432284 4682 scope.go:117] "RemoveContainer" containerID="5c8128186b8b7e9ff75f2846ef2abbad8c86372ba0f8a4ecdd8b06bb13111427" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.446627 4682 generic.go:334] "Generic (PLEG): container finished" podID="61016d34-59f1-41cf-b7e7-0163f4507336" containerID="855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5" exitCode=0 Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.446721 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" event={"ID":"61016d34-59f1-41cf-b7e7-0163f4507336","Type":"ContainerDied","Data":"855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5"} Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.446770 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" event={"ID":"61016d34-59f1-41cf-b7e7-0163f4507336","Type":"ContainerStarted","Data":"226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d"} Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.446806 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.510943 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-wrl96"] Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.573594 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f8c45789f-wrl96"] Dec 10 11:06:48 crc kubenswrapper[4682]: I1210 11:06:48.582851 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" podStartSLOduration=4.582510826 podStartE2EDuration="4.582510826s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:06:48.514930719 +0000 UTC m=+1288.835141489" watchObservedRunningTime="2025-12-10 11:06:48.582510826 +0000 UTC m=+1288.902721576" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.034867 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.183522 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-combined-ca-bundle\") pod \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.183578 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4djzx\" (UniqueName: \"kubernetes.io/projected/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-kube-api-access-4djzx\") pod \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.183676 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data\") pod \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.183965 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-db-sync-config-data\") pod \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.189181 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4ca63023-1a06-43a7-b9e4-1235b76b8ec8" (UID: "4ca63023-1a06-43a7-b9e4-1235b76b8ec8"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.196810 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-kube-api-access-4djzx" (OuterVolumeSpecName: "kube-api-access-4djzx") pod "4ca63023-1a06-43a7-b9e4-1235b76b8ec8" (UID: "4ca63023-1a06-43a7-b9e4-1235b76b8ec8"). InnerVolumeSpecName "kube-api-access-4djzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.225107 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ca63023-1a06-43a7-b9e4-1235b76b8ec8" (UID: "4ca63023-1a06-43a7-b9e4-1235b76b8ec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286040 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data" (OuterVolumeSpecName: "config-data") pod "4ca63023-1a06-43a7-b9e4-1235b76b8ec8" (UID: "4ca63023-1a06-43a7-b9e4-1235b76b8ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286274 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data\") pod \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\" (UID: \"4ca63023-1a06-43a7-b9e4-1235b76b8ec8\") " Dec 10 11:06:49 crc kubenswrapper[4682]: W1210 11:06:49.286415 4682 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4ca63023-1a06-43a7-b9e4-1235b76b8ec8/volumes/kubernetes.io~secret/config-data Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286428 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data" (OuterVolumeSpecName: "config-data") pod "4ca63023-1a06-43a7-b9e4-1235b76b8ec8" (UID: "4ca63023-1a06-43a7-b9e4-1235b76b8ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286759 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286777 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4djzx\" (UniqueName: \"kubernetes.io/projected/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-kube-api-access-4djzx\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286788 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.286798 4682 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4ca63023-1a06-43a7-b9e4-1235b76b8ec8-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.467396 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bxlhz" event={"ID":"4ca63023-1a06-43a7-b9e4-1235b76b8ec8","Type":"ContainerDied","Data":"f28a68f10a66cdcc9fe2ec9648300a850a0c1e5ca6b56f26359a796f29744abd"} Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.467411 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bxlhz" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.467772 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f28a68f10a66cdcc9fe2ec9648300a850a0c1e5ca6b56f26359a796f29744abd" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.828377 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-st98q"] Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.890783 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-qcgdz"] Dec 10 11:06:49 crc kubenswrapper[4682]: E1210 11:06:49.891307 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf92e34b-f212-4792-8712-c14f391746cf" containerName="dnsmasq-dns" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891323 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf92e34b-f212-4792-8712-c14f391746cf" containerName="dnsmasq-dns" Dec 10 11:06:49 crc kubenswrapper[4682]: E1210 11:06:49.891349 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f06ea747-bea0-4ce1-8600-afc308d91c82" containerName="init" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891357 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f06ea747-bea0-4ce1-8600-afc308d91c82" containerName="init" Dec 10 11:06:49 crc kubenswrapper[4682]: E1210 11:06:49.891371 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ca63023-1a06-43a7-b9e4-1235b76b8ec8" containerName="glance-db-sync" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891379 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ca63023-1a06-43a7-b9e4-1235b76b8ec8" containerName="glance-db-sync" Dec 10 11:06:49 crc kubenswrapper[4682]: E1210 11:06:49.891400 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf92e34b-f212-4792-8712-c14f391746cf" containerName="init" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891407 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf92e34b-f212-4792-8712-c14f391746cf" containerName="init" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891631 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf92e34b-f212-4792-8712-c14f391746cf" containerName="dnsmasq-dns" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891653 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ca63023-1a06-43a7-b9e4-1235b76b8ec8" containerName="glance-db-sync" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.891669 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f06ea747-bea0-4ce1-8600-afc308d91c82" containerName="init" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.892909 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:49 crc kubenswrapper[4682]: I1210 11:06:49.916837 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-qcgdz"] Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.000946 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-config\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.001031 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.001080 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.001190 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.001242 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d686k\" (UniqueName: \"kubernetes.io/projected/c43ce515-86e7-4f7c-a184-00575c924519-kube-api-access-d686k\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.001275 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.102432 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-config\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.102522 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.102560 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.102644 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.102672 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d686k\" (UniqueName: \"kubernetes.io/projected/c43ce515-86e7-4f7c-a184-00575c924519-kube-api-access-d686k\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.102692 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.103924 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.104000 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.104416 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.104583 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-config\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.104793 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.138856 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d686k\" (UniqueName: \"kubernetes.io/projected/c43ce515-86e7-4f7c-a184-00575c924519-kube-api-access-d686k\") pod \"dnsmasq-dns-57c957c4ff-qcgdz\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.234190 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.404222 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f06ea747-bea0-4ce1-8600-afc308d91c82" path="/var/lib/kubelet/pods/f06ea747-bea0-4ce1-8600-afc308d91c82/volumes" Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.505450 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" containerName="dnsmasq-dns" containerID="cri-o://226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d" gracePeriod=10 Dec 10 11:06:50 crc kubenswrapper[4682]: I1210 11:06:50.859369 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-qcgdz"] Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.005820 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.021862 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.028145 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qm2p5" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.028512 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.029174 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.032625 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.042928 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.048558 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.052092 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.058545 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127140 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127190 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127218 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127238 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxrq7\" (UniqueName: \"kubernetes.io/projected/bc9a199c-df24-4d39-a38d-b3ce67fba033-kube-api-access-gxrq7\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127258 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-scripts\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127381 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-logs\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127444 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj6h9\" (UniqueName: \"kubernetes.io/projected/011cae1f-76a2-4d73-97be-8cf2d85db880-kube-api-access-pj6h9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127503 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127532 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127564 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127588 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127607 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-config-data\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127663 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-logs\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.127689 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.212587 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229178 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-sb\") pod \"61016d34-59f1-41cf-b7e7-0163f4507336\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229244 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-config\") pod \"61016d34-59f1-41cf-b7e7-0163f4507336\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229455 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-nb\") pod \"61016d34-59f1-41cf-b7e7-0163f4507336\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229505 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-swift-storage-0\") pod \"61016d34-59f1-41cf-b7e7-0163f4507336\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229603 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62pkv\" (UniqueName: \"kubernetes.io/projected/61016d34-59f1-41cf-b7e7-0163f4507336-kube-api-access-62pkv\") pod \"61016d34-59f1-41cf-b7e7-0163f4507336\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229668 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-svc\") pod \"61016d34-59f1-41cf-b7e7-0163f4507336\" (UID: \"61016d34-59f1-41cf-b7e7-0163f4507336\") " Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.229964 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-config-data\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230038 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-logs\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230074 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230106 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230138 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230166 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230191 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxrq7\" (UniqueName: \"kubernetes.io/projected/bc9a199c-df24-4d39-a38d-b3ce67fba033-kube-api-access-gxrq7\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230216 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-scripts\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230268 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-logs\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230625 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj6h9\" (UniqueName: \"kubernetes.io/projected/011cae1f-76a2-4d73-97be-8cf2d85db880-kube-api-access-pj6h9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230684 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-logs\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230692 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230783 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230824 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.230847 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.231099 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.231827 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-logs\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.232597 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.234271 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61016d34-59f1-41cf-b7e7-0163f4507336-kube-api-access-62pkv" (OuterVolumeSpecName: "kube-api-access-62pkv") pod "61016d34-59f1-41cf-b7e7-0163f4507336" (UID: "61016d34-59f1-41cf-b7e7-0163f4507336"). InnerVolumeSpecName "kube-api-access-62pkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.239993 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-scripts\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.241405 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.241441 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eee35181b01d2e9acbd6a7670c690b29128fb0f1ac4a3b3e7ea6260a2e4780e5/globalmount\"" pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.241768 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.241794 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7b1923bf8bb403c24020ed876074f9fa5ba6aaf35e09637f2443da6ac1e5868a/globalmount\"" pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.242329 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-config-data\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.244856 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-scripts\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.245600 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.246012 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-config-data\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.252655 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj6h9\" (UniqueName: \"kubernetes.io/projected/011cae1f-76a2-4d73-97be-8cf2d85db880-kube-api-access-pj6h9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.260511 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxrq7\" (UniqueName: \"kubernetes.io/projected/bc9a199c-df24-4d39-a38d-b3ce67fba033-kube-api-access-gxrq7\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.274000 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.325747 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "61016d34-59f1-41cf-b7e7-0163f4507336" (UID: "61016d34-59f1-41cf-b7e7-0163f4507336"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.329930 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "61016d34-59f1-41cf-b7e7-0163f4507336" (UID: "61016d34-59f1-41cf-b7e7-0163f4507336"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.333129 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.333155 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.333166 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62pkv\" (UniqueName: \"kubernetes.io/projected/61016d34-59f1-41cf-b7e7-0163f4507336-kube-api-access-62pkv\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.335209 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.340615 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.388081 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-config" (OuterVolumeSpecName: "config") pod "61016d34-59f1-41cf-b7e7-0163f4507336" (UID: "61016d34-59f1-41cf-b7e7-0163f4507336"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.409203 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "61016d34-59f1-41cf-b7e7-0163f4507336" (UID: "61016d34-59f1-41cf-b7e7-0163f4507336"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.411315 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "61016d34-59f1-41cf-b7e7-0163f4507336" (UID: "61016d34-59f1-41cf-b7e7-0163f4507336"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.434982 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.435012 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.435021 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61016d34-59f1-41cf-b7e7-0163f4507336-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.499417 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.513165 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.525877 4682 generic.go:334] "Generic (PLEG): container finished" podID="6bb54d58-fcda-417c-953a-b198f1344c5d" containerID="45818218304beb7ae2a3a3ceada5cb07bd1e5c66b3ecda6a0ae2e3fe59274616" exitCode=0 Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.525935 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gxgf6" event={"ID":"6bb54d58-fcda-417c-953a-b198f1344c5d","Type":"ContainerDied","Data":"45818218304beb7ae2a3a3ceada5cb07bd1e5c66b3ecda6a0ae2e3fe59274616"} Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.527626 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" event={"ID":"c43ce515-86e7-4f7c-a184-00575c924519","Type":"ContainerStarted","Data":"59517bc02dc5db19012f25c189f8d70340c5c6cc6ad4b8bee03c57cb6198c7a6"} Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.530237 4682 generic.go:334] "Generic (PLEG): container finished" podID="61016d34-59f1-41cf-b7e7-0163f4507336" containerID="226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d" exitCode=0 Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.530259 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" event={"ID":"61016d34-59f1-41cf-b7e7-0163f4507336","Type":"ContainerDied","Data":"226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d"} Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.530275 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" event={"ID":"61016d34-59f1-41cf-b7e7-0163f4507336","Type":"ContainerDied","Data":"45041a8730d54154da7a98c9b2a98bdbc3360d548bce2a1a1ae5256abd6a4f6c"} Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.530293 4682 scope.go:117] "RemoveContainer" containerID="226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.530391 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcfdd6f9f-st98q" Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.675444 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-st98q"] Dec 10 11:06:51 crc kubenswrapper[4682]: I1210 11:06:51.696359 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcfdd6f9f-st98q"] Dec 10 11:06:52 crc kubenswrapper[4682]: I1210 11:06:52.397301 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" path="/var/lib/kubelet/pods/61016d34-59f1-41cf-b7e7-0163f4507336/volumes" Dec 10 11:06:55 crc kubenswrapper[4682]: I1210 11:06:55.026773 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:06:55 crc kubenswrapper[4682]: I1210 11:06:55.109311 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.109729 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.222481 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-credential-keys\") pod \"6bb54d58-fcda-417c-953a-b198f1344c5d\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.222557 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v2lq\" (UniqueName: \"kubernetes.io/projected/6bb54d58-fcda-417c-953a-b198f1344c5d-kube-api-access-6v2lq\") pod \"6bb54d58-fcda-417c-953a-b198f1344c5d\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.222588 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-combined-ca-bundle\") pod \"6bb54d58-fcda-417c-953a-b198f1344c5d\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.222679 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-config-data\") pod \"6bb54d58-fcda-417c-953a-b198f1344c5d\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.222773 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-scripts\") pod \"6bb54d58-fcda-417c-953a-b198f1344c5d\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.222796 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-fernet-keys\") pod \"6bb54d58-fcda-417c-953a-b198f1344c5d\" (UID: \"6bb54d58-fcda-417c-953a-b198f1344c5d\") " Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.229107 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6bb54d58-fcda-417c-953a-b198f1344c5d" (UID: "6bb54d58-fcda-417c-953a-b198f1344c5d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.229676 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-scripts" (OuterVolumeSpecName: "scripts") pod "6bb54d58-fcda-417c-953a-b198f1344c5d" (UID: "6bb54d58-fcda-417c-953a-b198f1344c5d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.241578 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6bb54d58-fcda-417c-953a-b198f1344c5d" (UID: "6bb54d58-fcda-417c-953a-b198f1344c5d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.241913 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bb54d58-fcda-417c-953a-b198f1344c5d-kube-api-access-6v2lq" (OuterVolumeSpecName: "kube-api-access-6v2lq") pod "6bb54d58-fcda-417c-953a-b198f1344c5d" (UID: "6bb54d58-fcda-417c-953a-b198f1344c5d"). InnerVolumeSpecName "kube-api-access-6v2lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.250885 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-config-data" (OuterVolumeSpecName: "config-data") pod "6bb54d58-fcda-417c-953a-b198f1344c5d" (UID: "6bb54d58-fcda-417c-953a-b198f1344c5d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.259384 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6bb54d58-fcda-417c-953a-b198f1344c5d" (UID: "6bb54d58-fcda-417c-953a-b198f1344c5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.325406 4682 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.325435 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v2lq\" (UniqueName: \"kubernetes.io/projected/6bb54d58-fcda-417c-953a-b198f1344c5d-kube-api-access-6v2lq\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.325446 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.325455 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.325463 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.325484 4682 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6bb54d58-fcda-417c-953a-b198f1344c5d-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.561818 4682 scope.go:117] "RemoveContainer" containerID="855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.623186 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gxgf6" event={"ID":"6bb54d58-fcda-417c-953a-b198f1344c5d","Type":"ContainerDied","Data":"58f3159a29f192c075464596c7ac266153590f740ee2d93923a3ecf0daf10258"} Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.623557 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58f3159a29f192c075464596c7ac266153590f740ee2d93923a3ecf0daf10258" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.623207 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gxgf6" Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.633893 4682 generic.go:334] "Generic (PLEG): container finished" podID="c43ce515-86e7-4f7c-a184-00575c924519" containerID="6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5" exitCode=0 Dec 10 11:06:59 crc kubenswrapper[4682]: I1210 11:06:59.633930 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" event={"ID":"c43ce515-86e7-4f7c-a184-00575c924519","Type":"ContainerDied","Data":"6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5"} Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.208578 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gxgf6"] Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.217202 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gxgf6"] Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.311989 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-ppzrh"] Dec 10 11:07:00 crc kubenswrapper[4682]: E1210 11:07:00.312823 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" containerName="dnsmasq-dns" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.312846 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" containerName="dnsmasq-dns" Dec 10 11:07:00 crc kubenswrapper[4682]: E1210 11:07:00.312886 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bb54d58-fcda-417c-953a-b198f1344c5d" containerName="keystone-bootstrap" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.312896 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bb54d58-fcda-417c-953a-b198f1344c5d" containerName="keystone-bootstrap" Dec 10 11:07:00 crc kubenswrapper[4682]: E1210 11:07:00.312912 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" containerName="init" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.312921 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" containerName="init" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.313591 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="61016d34-59f1-41cf-b7e7-0163f4507336" containerName="dnsmasq-dns" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.313618 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bb54d58-fcda-417c-953a-b198f1344c5d" containerName="keystone-bootstrap" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.314637 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.318763 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.319126 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-44cvq" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.319346 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.319713 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.336690 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-ppzrh"] Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.397338 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bb54d58-fcda-417c-953a-b198f1344c5d" path="/var/lib/kubelet/pods/6bb54d58-fcda-417c-953a-b198f1344c5d/volumes" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.445058 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-fernet-keys\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.445110 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-config-data\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.445140 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-credential-keys\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.445165 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9dvj\" (UniqueName: \"kubernetes.io/projected/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-kube-api-access-n9dvj\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.445342 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-scripts\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.445552 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-combined-ca-bundle\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.547733 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-scripts\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.547926 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-combined-ca-bundle\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.548043 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-fernet-keys\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.548096 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-config-data\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.548142 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-credential-keys\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.548190 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9dvj\" (UniqueName: \"kubernetes.io/projected/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-kube-api-access-n9dvj\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.553956 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-scripts\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.554745 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-config-data\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.564270 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-credential-keys\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.564672 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-combined-ca-bundle\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.568014 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-fernet-keys\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.570372 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9dvj\" (UniqueName: \"kubernetes.io/projected/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-kube-api-access-n9dvj\") pod \"keystone-bootstrap-ppzrh\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:00 crc kubenswrapper[4682]: I1210 11:07:00.648068 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:07 crc kubenswrapper[4682]: I1210 11:07:07.750729 4682 generic.go:334] "Generic (PLEG): container finished" podID="b9208fd9-c069-4f27-868c-e248ef7970c0" containerID="1171262eb5fd830e2362004bf67fd313cacc92b677df7bec7f101e53b6967961" exitCode=0 Dec 10 11:07:07 crc kubenswrapper[4682]: I1210 11:07:07.751275 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tx82q" event={"ID":"b9208fd9-c069-4f27-868c-e248ef7970c0","Type":"ContainerDied","Data":"1171262eb5fd830e2362004bf67fd313cacc92b677df7bec7f101e53b6967961"} Dec 10 11:07:12 crc kubenswrapper[4682]: E1210 11:07:12.428228 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Dec 10 11:07:12 crc kubenswrapper[4682]: E1210 11:07:12.429091 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p647q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-h5fss_openstack(8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:07:12 crc kubenswrapper[4682]: E1210 11:07:12.430307 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-h5fss" podUID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.570651 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tx82q" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.632604 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfssm\" (UniqueName: \"kubernetes.io/projected/b9208fd9-c069-4f27-868c-e248ef7970c0-kube-api-access-zfssm\") pod \"b9208fd9-c069-4f27-868c-e248ef7970c0\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.632988 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-combined-ca-bundle\") pod \"b9208fd9-c069-4f27-868c-e248ef7970c0\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.633024 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-config\") pod \"b9208fd9-c069-4f27-868c-e248ef7970c0\" (UID: \"b9208fd9-c069-4f27-868c-e248ef7970c0\") " Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.655238 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9208fd9-c069-4f27-868c-e248ef7970c0-kube-api-access-zfssm" (OuterVolumeSpecName: "kube-api-access-zfssm") pod "b9208fd9-c069-4f27-868c-e248ef7970c0" (UID: "b9208fd9-c069-4f27-868c-e248ef7970c0"). InnerVolumeSpecName "kube-api-access-zfssm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.682029 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9208fd9-c069-4f27-868c-e248ef7970c0" (UID: "b9208fd9-c069-4f27-868c-e248ef7970c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.684992 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-config" (OuterVolumeSpecName: "config") pod "b9208fd9-c069-4f27-868c-e248ef7970c0" (UID: "b9208fd9-c069-4f27-868c-e248ef7970c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.734927 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfssm\" (UniqueName: \"kubernetes.io/projected/b9208fd9-c069-4f27-868c-e248ef7970c0-kube-api-access-zfssm\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.734967 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.734985 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b9208fd9-c069-4f27-868c-e248ef7970c0-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.809179 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tx82q" event={"ID":"b9208fd9-c069-4f27-868c-e248ef7970c0","Type":"ContainerDied","Data":"de54bd86cf8eaedd79ded1615bfc78bdfddee5ee9d1256038fa69148a430746c"} Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.809217 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tx82q" Dec 10 11:07:12 crc kubenswrapper[4682]: I1210 11:07:12.809230 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de54bd86cf8eaedd79ded1615bfc78bdfddee5ee9d1256038fa69148a430746c" Dec 10 11:07:12 crc kubenswrapper[4682]: E1210 11:07:12.811234 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-h5fss" podUID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.753749 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-qcgdz"] Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.830390 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-thrpw"] Dec 10 11:07:13 crc kubenswrapper[4682]: E1210 11:07:13.830875 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9208fd9-c069-4f27-868c-e248ef7970c0" containerName="neutron-db-sync" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.830895 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9208fd9-c069-4f27-868c-e248ef7970c0" containerName="neutron-db-sync" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.831060 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9208fd9-c069-4f27-868c-e248ef7970c0" containerName="neutron-db-sync" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.833144 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.844253 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-thrpw"] Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.862727 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.862930 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.862995 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.863077 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-config\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.863106 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms9kj\" (UniqueName: \"kubernetes.io/projected/9483a109-197b-41a8-94ee-498bee3a67eb-kube-api-access-ms9kj\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.863142 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.934413 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-74cb8f8cb4-8fg27"] Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.940224 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.944009 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rx4sr" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.944324 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.944455 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.944602 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.956436 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-74cb8f8cb4-8fg27"] Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965190 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-httpd-config\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965259 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-config\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965309 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965346 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965403 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-config\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965421 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms9kj\" (UniqueName: \"kubernetes.io/projected/9483a109-197b-41a8-94ee-498bee3a67eb-kube-api-access-ms9kj\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965456 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-combined-ca-bundle\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965496 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965540 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-ovndb-tls-certs\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965567 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.965591 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2k7l\" (UniqueName: \"kubernetes.io/projected/cae3bbd4-4d3b-4cce-969d-6a742664664e-kube-api-access-r2k7l\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.970378 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-config\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.970744 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.970935 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.971067 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:13 crc kubenswrapper[4682]: I1210 11:07:13.971506 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.011550 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms9kj\" (UniqueName: \"kubernetes.io/projected/9483a109-197b-41a8-94ee-498bee3a67eb-kube-api-access-ms9kj\") pod \"dnsmasq-dns-5ccc5c4795-thrpw\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.068543 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-config\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.068660 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-combined-ca-bundle\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.068706 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-ovndb-tls-certs\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.068736 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2k7l\" (UniqueName: \"kubernetes.io/projected/cae3bbd4-4d3b-4cce-969d-6a742664664e-kube-api-access-r2k7l\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.068780 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-httpd-config\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.072106 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-httpd-config\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.081358 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-combined-ca-bundle\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.083406 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-ovndb-tls-certs\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.083530 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-config\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.119313 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2k7l\" (UniqueName: \"kubernetes.io/projected/cae3bbd4-4d3b-4cce-969d-6a742664664e-kube-api-access-r2k7l\") pod \"neutron-74cb8f8cb4-8fg27\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.167925 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:14 crc kubenswrapper[4682]: I1210 11:07:14.271141 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:14 crc kubenswrapper[4682]: E1210 11:07:14.496154 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 10 11:07:14 crc kubenswrapper[4682]: E1210 11:07:14.496627 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f9gcl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-8h57v_openstack(382d9ec8-5a3b-47b3-a301-955c7e2a4ecb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:07:14 crc kubenswrapper[4682]: E1210 11:07:14.497816 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-8h57v" podUID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" Dec 10 11:07:14 crc kubenswrapper[4682]: E1210 11:07:14.834028 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-8h57v" podUID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" Dec 10 11:07:15 crc kubenswrapper[4682]: I1210 11:07:15.958988 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f7fc58469-rvhd4"] Dec 10 11:07:15 crc kubenswrapper[4682]: I1210 11:07:15.961588 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:15 crc kubenswrapper[4682]: I1210 11:07:15.963460 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 10 11:07:15 crc kubenswrapper[4682]: I1210 11:07:15.979577 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f7fc58469-rvhd4"] Dec 10 11:07:15 crc kubenswrapper[4682]: I1210 11:07:15.987020 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.007995 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-public-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.008093 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvt94\" (UniqueName: \"kubernetes.io/projected/ec3a169e-4679-409e-a778-f88b4972abf8-kube-api-access-tvt94\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.008119 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-httpd-config\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.008143 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-combined-ca-bundle\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.008193 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-config\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.008245 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-internal-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.008260 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-ovndb-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110336 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-httpd-config\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110405 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-combined-ca-bundle\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110510 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-config\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110616 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-ovndb-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110639 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-internal-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110683 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-public-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.110807 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvt94\" (UniqueName: \"kubernetes.io/projected/ec3a169e-4679-409e-a778-f88b4972abf8-kube-api-access-tvt94\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.117331 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-config\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.118929 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-internal-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.119170 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-public-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.119637 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-httpd-config\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.119960 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-combined-ca-bundle\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.120751 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec3a169e-4679-409e-a778-f88b4972abf8-ovndb-tls-certs\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.132882 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvt94\" (UniqueName: \"kubernetes.io/projected/ec3a169e-4679-409e-a778-f88b4972abf8-kube-api-access-tvt94\") pod \"neutron-7f7fc58469-rvhd4\" (UID: \"ec3a169e-4679-409e-a778-f88b4972abf8\") " pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:16 crc kubenswrapper[4682]: I1210 11:07:16.284369 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:18 crc kubenswrapper[4682]: I1210 11:07:18.856347 4682 scope.go:117] "RemoveContainer" containerID="226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d" Dec 10 11:07:18 crc kubenswrapper[4682]: E1210 11:07:18.859894 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d\": container with ID starting with 226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d not found: ID does not exist" containerID="226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d" Dec 10 11:07:18 crc kubenswrapper[4682]: I1210 11:07:18.859936 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d"} err="failed to get container status \"226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d\": rpc error: code = NotFound desc = could not find container \"226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d\": container with ID starting with 226b5b5ee216cd48c0da5656b24dcccbe006ad5adcf6b24bcb4df4d7fb8ac20d not found: ID does not exist" Dec 10 11:07:18 crc kubenswrapper[4682]: I1210 11:07:18.859961 4682 scope.go:117] "RemoveContainer" containerID="855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5" Dec 10 11:07:18 crc kubenswrapper[4682]: E1210 11:07:18.860306 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5\": container with ID starting with 855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5 not found: ID does not exist" containerID="855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5" Dec 10 11:07:18 crc kubenswrapper[4682]: I1210 11:07:18.860368 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5"} err="failed to get container status \"855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5\": rpc error: code = NotFound desc = could not find container \"855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5\": container with ID starting with 855bf2dd268fd4f281736a65994c9a1b937e822693a7d45754b25a2fa53afee5 not found: ID does not exist" Dec 10 11:07:21 crc kubenswrapper[4682]: I1210 11:07:21.979099 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:07:22 crc kubenswrapper[4682]: I1210 11:07:22.947172 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:07:24 crc kubenswrapper[4682]: W1210 11:07:24.067073 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod011cae1f_76a2_4d73_97be_8cf2d85db880.slice/crio-6f152783e58b9bfa0334c22ec78446c7bb0801920dee7428ef3bbaed7deb8614 WatchSource:0}: Error finding container 6f152783e58b9bfa0334c22ec78446c7bb0801920dee7428ef3bbaed7deb8614: Status 404 returned error can't find the container with id 6f152783e58b9bfa0334c22ec78446c7bb0801920dee7428ef3bbaed7deb8614 Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.506894 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-ppzrh"] Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.694523 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-74cb8f8cb4-8fg27"] Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.721715 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-thrpw"] Dec 10 11:07:24 crc kubenswrapper[4682]: W1210 11:07:24.828465 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c6b21b2_6d5f_4cc0_a3af_f3cbb98067b2.slice/crio-4d17f701062cd814bd7cb3de82d7a6b635d08a10088f547d8fc65fdd15031955 WatchSource:0}: Error finding container 4d17f701062cd814bd7cb3de82d7a6b635d08a10088f547d8fc65fdd15031955: Status 404 returned error can't find the container with id 4d17f701062cd814bd7cb3de82d7a6b635d08a10088f547d8fc65fdd15031955 Dec 10 11:07:24 crc kubenswrapper[4682]: E1210 11:07:24.853215 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current" Dec 10 11:07:24 crc kubenswrapper[4682]: E1210 11:07:24.853284 4682 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current" Dec 10 11:07:24 crc kubenswrapper[4682]: E1210 11:07:24.853420 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xbvjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-9q89f_openstack(997c9b87-b796-40a3-a9c9-cf1e2a3abc4d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 11:07:24 crc kubenswrapper[4682]: E1210 11:07:24.855088 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cloudkitty-db-sync-9q89f" podUID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.966266 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" event={"ID":"c43ce515-86e7-4f7c-a184-00575c924519","Type":"ContainerStarted","Data":"d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511"} Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.966773 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.966336 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" podUID="c43ce515-86e7-4f7c-a184-00575c924519" containerName="dnsmasq-dns" containerID="cri-o://d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511" gracePeriod=10 Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.968534 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" event={"ID":"9483a109-197b-41a8-94ee-498bee3a67eb","Type":"ContainerStarted","Data":"2faaa8fa3f6d8fba11b1292294db2a2b2d82dba1797cea49278aecd0464daca9"} Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.970287 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ppzrh" event={"ID":"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2","Type":"ContainerStarted","Data":"4d17f701062cd814bd7cb3de82d7a6b635d08a10088f547d8fc65fdd15031955"} Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.971704 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"011cae1f-76a2-4d73-97be-8cf2d85db880","Type":"ContainerStarted","Data":"6f152783e58b9bfa0334c22ec78446c7bb0801920dee7428ef3bbaed7deb8614"} Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.973107 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74cb8f8cb4-8fg27" event={"ID":"cae3bbd4-4d3b-4cce-969d-6a742664664e","Type":"ContainerStarted","Data":"3d93b711e61c35b34f104aea7e501e0de2d2abc514c7f76f57cbafefc317111b"} Dec 10 11:07:24 crc kubenswrapper[4682]: I1210 11:07:24.974407 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bc9a199c-df24-4d39-a38d-b3ce67fba033","Type":"ContainerStarted","Data":"13c1344d9073e387a39645eb6cd530b5ae9bc7607d27395af20f540243692149"} Dec 10 11:07:25 crc kubenswrapper[4682]: E1210 11:07:24.998287 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current\\\"\"" pod="openstack/cloudkitty-db-sync-9q89f" podUID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.024232 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" podStartSLOduration=36.024206392 podStartE2EDuration="36.024206392s" podCreationTimestamp="2025-12-10 11:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:24.994294884 +0000 UTC m=+1325.314505644" watchObservedRunningTime="2025-12-10 11:07:25.024206392 +0000 UTC m=+1325.344417142" Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.396753 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f7fc58469-rvhd4"] Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.669314 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.787593 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-sb\") pod \"c43ce515-86e7-4f7c-a184-00575c924519\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.787927 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-svc\") pod \"c43ce515-86e7-4f7c-a184-00575c924519\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.788120 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-swift-storage-0\") pod \"c43ce515-86e7-4f7c-a184-00575c924519\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.788220 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d686k\" (UniqueName: \"kubernetes.io/projected/c43ce515-86e7-4f7c-a184-00575c924519-kube-api-access-d686k\") pod \"c43ce515-86e7-4f7c-a184-00575c924519\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.788384 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-nb\") pod \"c43ce515-86e7-4f7c-a184-00575c924519\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.788496 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-config\") pod \"c43ce515-86e7-4f7c-a184-00575c924519\" (UID: \"c43ce515-86e7-4f7c-a184-00575c924519\") " Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.802762 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c43ce515-86e7-4f7c-a184-00575c924519-kube-api-access-d686k" (OuterVolumeSpecName: "kube-api-access-d686k") pod "c43ce515-86e7-4f7c-a184-00575c924519" (UID: "c43ce515-86e7-4f7c-a184-00575c924519"). InnerVolumeSpecName "kube-api-access-d686k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:25 crc kubenswrapper[4682]: I1210 11:07:25.891057 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d686k\" (UniqueName: \"kubernetes.io/projected/c43ce515-86e7-4f7c-a184-00575c924519-kube-api-access-d686k\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.004702 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f7fc58469-rvhd4" event={"ID":"ec3a169e-4679-409e-a778-f88b4972abf8","Type":"ContainerStarted","Data":"c2dc11a7bdb118e1e0dfe51e67198f6a3d6c95f81202f191bfb974565e60571d"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.011688 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c43ce515-86e7-4f7c-a184-00575c924519" (UID: "c43ce515-86e7-4f7c-a184-00575c924519"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.019009 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c43ce515-86e7-4f7c-a184-00575c924519" (UID: "c43ce515-86e7-4f7c-a184-00575c924519"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.025003 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c43ce515-86e7-4f7c-a184-00575c924519" (UID: "c43ce515-86e7-4f7c-a184-00575c924519"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.025182 4682 generic.go:334] "Generic (PLEG): container finished" podID="c43ce515-86e7-4f7c-a184-00575c924519" containerID="d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511" exitCode=0 Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.025848 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" event={"ID":"c43ce515-86e7-4f7c-a184-00575c924519","Type":"ContainerDied","Data":"d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.025886 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" event={"ID":"c43ce515-86e7-4f7c-a184-00575c924519","Type":"ContainerDied","Data":"59517bc02dc5db19012f25c189f8d70340c5c6cc6ad4b8bee03c57cb6198c7a6"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.025902 4682 scope.go:117] "RemoveContainer" containerID="d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.025952 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-qcgdz" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.034232 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-config" (OuterVolumeSpecName: "config") pod "c43ce515-86e7-4f7c-a184-00575c924519" (UID: "c43ce515-86e7-4f7c-a184-00575c924519"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.037145 4682 generic.go:334] "Generic (PLEG): container finished" podID="9483a109-197b-41a8-94ee-498bee3a67eb" containerID="f5cb586da4d0b375318f417e0458181ec5a3b4df6956e469b1a119d8f097ec9c" exitCode=0 Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.037271 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" event={"ID":"9483a109-197b-41a8-94ee-498bee3a67eb","Type":"ContainerDied","Data":"f5cb586da4d0b375318f417e0458181ec5a3b4df6956e469b1a119d8f097ec9c"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.041287 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ppzrh" event={"ID":"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2","Type":"ContainerStarted","Data":"d44450d7db3b3f08590d80b491c4dc6dc1ef11723c687adada13904ae092f553"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.041866 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c43ce515-86e7-4f7c-a184-00575c924519" (UID: "c43ce515-86e7-4f7c-a184-00575c924519"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.044763 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74cb8f8cb4-8fg27" event={"ID":"cae3bbd4-4d3b-4cce-969d-6a742664664e","Type":"ContainerStarted","Data":"43a5b4fc146dbd44eb112f3dde542dbafb8cebe63e489137121408672242f4a2"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.044818 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74cb8f8cb4-8fg27" event={"ID":"cae3bbd4-4d3b-4cce-969d-6a742664664e","Type":"ContainerStarted","Data":"faa278a4b836a3b718a93da59d3922dc7ee5f150d84552d6723475d5a46656b6"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.045522 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.047657 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6g6l7" event={"ID":"c6258156-0c39-4f7b-a367-954f1eb68718","Type":"ContainerStarted","Data":"b0541aa7f9c23fb6eef1ef55937b6fa7738e2170c6eec45e4c19bbd20ca954da"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.052368 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerStarted","Data":"f8aab96b0f7d4ac6c1f8c39574398e911e23efeeaffa56928b0213e6d14e0cea"} Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.086169 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-74cb8f8cb4-8fg27" podStartSLOduration=13.086152925 podStartE2EDuration="13.086152925s" podCreationTimestamp="2025-12-10 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:26.080858949 +0000 UTC m=+1326.401069709" watchObservedRunningTime="2025-12-10 11:07:26.086152925 +0000 UTC m=+1326.406363675" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.099881 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.099905 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.099914 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.099922 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.099930 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c43ce515-86e7-4f7c-a184-00575c924519-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.126535 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-6g6l7" podStartSLOduration=7.223357882 podStartE2EDuration="42.12651778s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="2025-12-10 11:06:46.38682375 +0000 UTC m=+1286.707034500" lastFinishedPulling="2025-12-10 11:07:21.289983648 +0000 UTC m=+1321.610194398" observedRunningTime="2025-12-10 11:07:26.095481867 +0000 UTC m=+1326.415692617" watchObservedRunningTime="2025-12-10 11:07:26.12651778 +0000 UTC m=+1326.446728530" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.134018 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-ppzrh" podStartSLOduration=26.134001644 podStartE2EDuration="26.134001644s" podCreationTimestamp="2025-12-10 11:07:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:26.117124285 +0000 UTC m=+1326.437335035" watchObservedRunningTime="2025-12-10 11:07:26.134001644 +0000 UTC m=+1326.454212394" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.361444 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-qcgdz"] Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.378017 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-qcgdz"] Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.415346 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c43ce515-86e7-4f7c-a184-00575c924519" path="/var/lib/kubelet/pods/c43ce515-86e7-4f7c-a184-00575c924519/volumes" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.548325 4682 scope.go:117] "RemoveContainer" containerID="6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.665849 4682 scope.go:117] "RemoveContainer" containerID="d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511" Dec 10 11:07:26 crc kubenswrapper[4682]: E1210 11:07:26.669819 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511\": container with ID starting with d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511 not found: ID does not exist" containerID="d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.669918 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511"} err="failed to get container status \"d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511\": rpc error: code = NotFound desc = could not find container \"d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511\": container with ID starting with d816305852db1834165c36eb872791f4efd79c288a74179c3ef6f7cbe21df511 not found: ID does not exist" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.669975 4682 scope.go:117] "RemoveContainer" containerID="6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5" Dec 10 11:07:26 crc kubenswrapper[4682]: E1210 11:07:26.673759 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5\": container with ID starting with 6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5 not found: ID does not exist" containerID="6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5" Dec 10 11:07:26 crc kubenswrapper[4682]: I1210 11:07:26.673803 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5"} err="failed to get container status \"6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5\": rpc error: code = NotFound desc = could not find container \"6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5\": container with ID starting with 6ecf55b44a7e6712a1fd7ed57c4328e1a18e199f6d2830e2e81128cbd8072cd5 not found: ID does not exist" Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.158004 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" event={"ID":"9483a109-197b-41a8-94ee-498bee3a67eb","Type":"ContainerStarted","Data":"1b597744ef7ccfe50dac0a258b9776fc9871100f64e0c5ed0d9ad8f27681f900"} Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.158077 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.160304 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"011cae1f-76a2-4d73-97be-8cf2d85db880","Type":"ContainerStarted","Data":"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3"} Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.183495 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bc9a199c-df24-4d39-a38d-b3ce67fba033","Type":"ContainerStarted","Data":"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7"} Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.188994 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerStarted","Data":"f7d29c4e4534990c2c1ebf08c35f64ff5483a7bfbbc4ae031f298eda3e8928c8"} Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.194938 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f7fc58469-rvhd4" event={"ID":"ec3a169e-4679-409e-a778-f88b4972abf8","Type":"ContainerStarted","Data":"ed62ce2838ca7fe4bb403ab773501ed476b8ee8f35732b242e78aebeee342329"} Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.195006 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f7fc58469-rvhd4" event={"ID":"ec3a169e-4679-409e-a778-f88b4972abf8","Type":"ContainerStarted","Data":"09765617fe1f6009bf4f53aaa6d3d2bf3ce8fbe2ec1eedb6c2a3a9c4adccfd2c"} Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.227521 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f7fc58469-rvhd4" podStartSLOduration=12.227497087 podStartE2EDuration="12.227497087s" podCreationTimestamp="2025-12-10 11:07:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:27.220165267 +0000 UTC m=+1327.540376027" watchObservedRunningTime="2025-12-10 11:07:27.227497087 +0000 UTC m=+1327.547707837" Dec 10 11:07:27 crc kubenswrapper[4682]: I1210 11:07:27.233971 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" podStartSLOduration=14.233949628 podStartE2EDuration="14.233949628s" podCreationTimestamp="2025-12-10 11:07:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:27.177726907 +0000 UTC m=+1327.497937677" watchObservedRunningTime="2025-12-10 11:07:27.233949628 +0000 UTC m=+1327.554160378" Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.212855 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"011cae1f-76a2-4d73-97be-8cf2d85db880","Type":"ContainerStarted","Data":"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1"} Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.212942 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-log" containerID="cri-o://4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3" gracePeriod=30 Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.213026 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-httpd" containerID="cri-o://84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1" gracePeriod=30 Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.218256 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8h57v" event={"ID":"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb","Type":"ContainerStarted","Data":"6763563e2a4caa280c3dfa595fb0f443b1e77703b9630b707d072babeac76023"} Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.224638 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bc9a199c-df24-4d39-a38d-b3ce67fba033","Type":"ContainerStarted","Data":"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f"} Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.224790 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-log" containerID="cri-o://c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7" gracePeriod=30 Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.224897 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-httpd" containerID="cri-o://325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f" gracePeriod=30 Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.232608 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-h5fss" event={"ID":"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54","Type":"ContainerStarted","Data":"08dcaf058345a8eba6ed267cc166bcb549b4f3967e92d25302b179639f32454f"} Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.233832 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.254179 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=39.254155945 podStartE2EDuration="39.254155945s" podCreationTimestamp="2025-12-10 11:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:28.249026884 +0000 UTC m=+1328.569237634" watchObservedRunningTime="2025-12-10 11:07:28.254155945 +0000 UTC m=+1328.574366695" Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.279019 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=39.279005073 podStartE2EDuration="39.279005073s" podCreationTimestamp="2025-12-10 11:06:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:28.275483572 +0000 UTC m=+1328.595694322" watchObservedRunningTime="2025-12-10 11:07:28.279005073 +0000 UTC m=+1328.599215823" Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.314020 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-h5fss" podStartSLOduration=3.480049493 podStartE2EDuration="44.314003229s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="2025-12-10 11:06:46.739847551 +0000 UTC m=+1287.060058301" lastFinishedPulling="2025-12-10 11:07:27.573801287 +0000 UTC m=+1327.894012037" observedRunningTime="2025-12-10 11:07:28.292258948 +0000 UTC m=+1328.612469728" watchObservedRunningTime="2025-12-10 11:07:28.314003229 +0000 UTC m=+1328.634213979" Dec 10 11:07:28 crc kubenswrapper[4682]: I1210 11:07:28.325342 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-8h57v" podStartSLOduration=3.236922065 podStartE2EDuration="44.325316754s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="2025-12-10 11:06:45.894950948 +0000 UTC m=+1286.215161698" lastFinishedPulling="2025-12-10 11:07:26.983345637 +0000 UTC m=+1327.303556387" observedRunningTime="2025-12-10 11:07:28.309953373 +0000 UTC m=+1328.630164203" watchObservedRunningTime="2025-12-10 11:07:28.325316754 +0000 UTC m=+1328.645527504" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.091771 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.200726 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-logs\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.201049 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-scripts\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.201362 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.201394 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-config-data\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.201456 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-httpd-run\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.201616 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxrq7\" (UniqueName: \"kubernetes.io/projected/bc9a199c-df24-4d39-a38d-b3ce67fba033-kube-api-access-gxrq7\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.201712 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-combined-ca-bundle\") pod \"bc9a199c-df24-4d39-a38d-b3ce67fba033\" (UID: \"bc9a199c-df24-4d39-a38d-b3ce67fba033\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.202087 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-logs" (OuterVolumeSpecName: "logs") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.203878 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.202180 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.209231 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-scripts" (OuterVolumeSpecName: "scripts") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.209276 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc9a199c-df24-4d39-a38d-b3ce67fba033-kube-api-access-gxrq7" (OuterVolumeSpecName: "kube-api-access-gxrq7") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "kube-api-access-gxrq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.222854 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.228452 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52" (OuterVolumeSpecName: "glance") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "pvc-7787cbf6-7249-471e-a024-697e395dbc52". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.238992 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.255536 4682 generic.go:334] "Generic (PLEG): container finished" podID="c6258156-0c39-4f7b-a367-954f1eb68718" containerID="b0541aa7f9c23fb6eef1ef55937b6fa7738e2170c6eec45e4c19bbd20ca954da" exitCode=0 Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.255676 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6g6l7" event={"ID":"c6258156-0c39-4f7b-a367-954f1eb68718","Type":"ContainerDied","Data":"b0541aa7f9c23fb6eef1ef55937b6fa7738e2170c6eec45e4c19bbd20ca954da"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.260902 4682 generic.go:334] "Generic (PLEG): container finished" podID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerID="84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1" exitCode=0 Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.260935 4682 generic.go:334] "Generic (PLEG): container finished" podID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerID="4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3" exitCode=143 Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.261010 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"011cae1f-76a2-4d73-97be-8cf2d85db880","Type":"ContainerDied","Data":"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.261040 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"011cae1f-76a2-4d73-97be-8cf2d85db880","Type":"ContainerDied","Data":"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.261053 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"011cae1f-76a2-4d73-97be-8cf2d85db880","Type":"ContainerDied","Data":"6f152783e58b9bfa0334c22ec78446c7bb0801920dee7428ef3bbaed7deb8614"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.261086 4682 scope.go:117] "RemoveContainer" containerID="84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.261234 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.288209 4682 generic.go:334] "Generic (PLEG): container finished" podID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerID="325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f" exitCode=0 Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.288235 4682 generic.go:334] "Generic (PLEG): container finished" podID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerID="c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7" exitCode=143 Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.288362 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.289461 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bc9a199c-df24-4d39-a38d-b3ce67fba033","Type":"ContainerDied","Data":"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.289503 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bc9a199c-df24-4d39-a38d-b3ce67fba033","Type":"ContainerDied","Data":"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.289517 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"bc9a199c-df24-4d39-a38d-b3ce67fba033","Type":"ContainerDied","Data":"13c1344d9073e387a39645eb6cd530b5ae9bc7607d27395af20f540243692149"} Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.302311 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-config-data" (OuterVolumeSpecName: "config-data") pod "bc9a199c-df24-4d39-a38d-b3ce67fba033" (UID: "bc9a199c-df24-4d39-a38d-b3ce67fba033"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320192 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-scripts\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320328 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-config-data\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320359 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj6h9\" (UniqueName: \"kubernetes.io/projected/011cae1f-76a2-4d73-97be-8cf2d85db880-kube-api-access-pj6h9\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320425 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-httpd-run\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320502 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-logs\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320636 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.320692 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-combined-ca-bundle\") pod \"011cae1f-76a2-4d73-97be-8cf2d85db880\" (UID: \"011cae1f-76a2-4d73-97be-8cf2d85db880\") " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321147 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-logs" (OuterVolumeSpecName: "logs") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321238 4682 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc9a199c-df24-4d39-a38d-b3ce67fba033-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321257 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxrq7\" (UniqueName: \"kubernetes.io/projected/bc9a199c-df24-4d39-a38d-b3ce67fba033-kube-api-access-gxrq7\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321270 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321282 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321308 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") on node \"crc\" " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321321 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc9a199c-df24-4d39-a38d-b3ce67fba033-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.321368 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.324623 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-scripts" (OuterVolumeSpecName: "scripts") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.338726 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/011cae1f-76a2-4d73-97be-8cf2d85db880-kube-api-access-pj6h9" (OuterVolumeSpecName: "kube-api-access-pj6h9") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "kube-api-access-pj6h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.338820 4682 scope.go:117] "RemoveContainer" containerID="4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.351569 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9" (OuterVolumeSpecName: "glance") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.372092 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.393721 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-config-data" (OuterVolumeSpecName: "config-data") pod "011cae1f-76a2-4d73-97be-8cf2d85db880" (UID: "011cae1f-76a2-4d73-97be-8cf2d85db880"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.401697 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.401894 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7787cbf6-7249-471e-a024-697e395dbc52" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52") on node "crc" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423296 4682 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423341 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/011cae1f-76a2-4d73-97be-8cf2d85db880-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423366 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") on node \"crc\" " Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423382 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423396 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423408 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423420 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/011cae1f-76a2-4d73-97be-8cf2d85db880-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.423431 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj6h9\" (UniqueName: \"kubernetes.io/projected/011cae1f-76a2-4d73-97be-8cf2d85db880-kube-api-access-pj6h9\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.458866 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.459015 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9") on node "crc" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.498653 4682 scope.go:117] "RemoveContainer" containerID="84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.499455 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1\": container with ID starting with 84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1 not found: ID does not exist" containerID="84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.499513 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1"} err="failed to get container status \"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1\": rpc error: code = NotFound desc = could not find container \"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1\": container with ID starting with 84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1 not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.499560 4682 scope.go:117] "RemoveContainer" containerID="4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.500099 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3\": container with ID starting with 4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3 not found: ID does not exist" containerID="4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.500152 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3"} err="failed to get container status \"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3\": rpc error: code = NotFound desc = could not find container \"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3\": container with ID starting with 4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3 not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.500183 4682 scope.go:117] "RemoveContainer" containerID="84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.500492 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1"} err="failed to get container status \"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1\": rpc error: code = NotFound desc = could not find container \"84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1\": container with ID starting with 84180c7c3de66dc42f0bb2888df2d77755f20fb2cc8e566bea815a35ccb73df1 not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.500510 4682 scope.go:117] "RemoveContainer" containerID="4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.500834 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3"} err="failed to get container status \"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3\": rpc error: code = NotFound desc = could not find container \"4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3\": container with ID starting with 4862c57b46d00348dafa6a46feafee495ecb554296c9c9c215c4db46b2664da3 not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.500862 4682 scope.go:117] "RemoveContainer" containerID="325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.526812 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.550232 4682 scope.go:117] "RemoveContainer" containerID="c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.599838 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.618731 4682 scope.go:117] "RemoveContainer" containerID="325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.620273 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.621488 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f\": container with ID starting with 325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f not found: ID does not exist" containerID="325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.621541 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f"} err="failed to get container status \"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f\": rpc error: code = NotFound desc = could not find container \"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f\": container with ID starting with 325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.621574 4682 scope.go:117] "RemoveContainer" containerID="c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.634886 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7\": container with ID starting with c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7 not found: ID does not exist" containerID="c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.634947 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7"} err="failed to get container status \"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7\": rpc error: code = NotFound desc = could not find container \"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7\": container with ID starting with c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7 not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.634973 4682 scope.go:117] "RemoveContainer" containerID="325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.635448 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f"} err="failed to get container status \"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f\": rpc error: code = NotFound desc = could not find container \"325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f\": container with ID starting with 325a8ec7b36aa30dd565c06be1da47f1e79cdc4ee601530d820f2be601f4838f not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.635485 4682 scope.go:117] "RemoveContainer" containerID="c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.635815 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7"} err="failed to get container status \"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7\": rpc error: code = NotFound desc = could not find container \"c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7\": container with ID starting with c08a2d75520d21729715b94f14d53afae42fc51bc63a50b9f4635a9d8268aea7 not found: ID does not exist" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.639437 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.639978 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-httpd" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640003 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-httpd" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.640018 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-httpd" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640027 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-httpd" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.640040 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-log" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640048 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-log" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.640072 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43ce515-86e7-4f7c-a184-00575c924519" containerName="init" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640079 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43ce515-86e7-4f7c-a184-00575c924519" containerName="init" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.640097 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43ce515-86e7-4f7c-a184-00575c924519" containerName="dnsmasq-dns" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640104 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43ce515-86e7-4f7c-a184-00575c924519" containerName="dnsmasq-dns" Dec 10 11:07:29 crc kubenswrapper[4682]: E1210 11:07:29.640123 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-log" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640131 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-log" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640380 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-httpd" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640396 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" containerName="glance-log" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640407 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-httpd" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640420 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c43ce515-86e7-4f7c-a184-00575c924519" containerName="dnsmasq-dns" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.640434 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" containerName="glance-log" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.642683 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.646088 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.647132 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qm2p5" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.647374 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.651519 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.655753 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.667694 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.699586 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.719584 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.721221 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.726076 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.726379 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730249 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730319 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730357 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-logs\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730416 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fng64\" (UniqueName: \"kubernetes.io/projected/a8f8fbad-ac9b-4103-8370-9693c234d655-kube-api-access-fng64\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730501 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730548 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730592 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.730651 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.751756 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832717 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832779 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-config-data\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832819 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832838 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832859 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832882 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-logs\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832926 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.832966 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fng64\" (UniqueName: \"kubernetes.io/projected/a8f8fbad-ac9b-4103-8370-9693c234d655-kube-api-access-fng64\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833032 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833071 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-scripts\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833099 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833116 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833170 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833212 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4tsr\" (UniqueName: \"kubernetes.io/projected/72889b73-e773-4559-800e-a87032e35a05-kube-api-access-l4tsr\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833261 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.833288 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-logs\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.834974 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.836818 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-logs\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.840194 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.842718 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.843362 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.850457 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.850587 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.850724 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7b1923bf8bb403c24020ed876074f9fa5ba6aaf35e09637f2443da6ac1e5868a/globalmount\"" pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.851257 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fng64\" (UniqueName: \"kubernetes.io/projected/a8f8fbad-ac9b-4103-8370-9693c234d655-kube-api-access-fng64\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.914918 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934531 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4tsr\" (UniqueName: \"kubernetes.io/projected/72889b73-e773-4559-800e-a87032e35a05-kube-api-access-l4tsr\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934586 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-logs\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934628 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-config-data\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934661 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934680 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934703 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934770 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-scripts\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.934793 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.935234 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.937005 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-logs\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.943339 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.943597 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-config-data\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.944144 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-scripts\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.944675 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.944706 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eee35181b01d2e9acbd6a7670c690b29128fb0f1ac4a3b3e7ea6260a2e4780e5/globalmount\"" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.953557 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.959719 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4tsr\" (UniqueName: \"kubernetes.io/projected/72889b73-e773-4559-800e-a87032e35a05-kube-api-access-l4tsr\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.972997 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:07:29 crc kubenswrapper[4682]: I1210 11:07:29.986991 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:07:30 crc kubenswrapper[4682]: I1210 11:07:30.038272 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:30 crc kubenswrapper[4682]: I1210 11:07:30.411958 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="011cae1f-76a2-4d73-97be-8cf2d85db880" path="/var/lib/kubelet/pods/011cae1f-76a2-4d73-97be-8cf2d85db880/volumes" Dec 10 11:07:30 crc kubenswrapper[4682]: I1210 11:07:30.414417 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc9a199c-df24-4d39-a38d-b3ce67fba033" path="/var/lib/kubelet/pods/bc9a199c-df24-4d39-a38d-b3ce67fba033/volumes" Dec 10 11:07:30 crc kubenswrapper[4682]: I1210 11:07:30.561054 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:07:30 crc kubenswrapper[4682]: I1210 11:07:30.717226 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:07:31 crc kubenswrapper[4682]: I1210 11:07:31.335388 4682 generic.go:334] "Generic (PLEG): container finished" podID="3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" containerID="d44450d7db3b3f08590d80b491c4dc6dc1ef11723c687adada13904ae092f553" exitCode=0 Dec 10 11:07:31 crc kubenswrapper[4682]: I1210 11:07:31.335451 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ppzrh" event={"ID":"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2","Type":"ContainerDied","Data":"d44450d7db3b3f08590d80b491c4dc6dc1ef11723c687adada13904ae092f553"} Dec 10 11:07:31 crc kubenswrapper[4682]: I1210 11:07:31.338849 4682 generic.go:334] "Generic (PLEG): container finished" podID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" containerID="08dcaf058345a8eba6ed267cc166bcb549b4f3967e92d25302b179639f32454f" exitCode=0 Dec 10 11:07:31 crc kubenswrapper[4682]: I1210 11:07:31.338886 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-h5fss" event={"ID":"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54","Type":"ContainerDied","Data":"08dcaf058345a8eba6ed267cc166bcb549b4f3967e92d25302b179639f32454f"} Dec 10 11:07:34 crc kubenswrapper[4682]: I1210 11:07:34.169644 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:34 crc kubenswrapper[4682]: I1210 11:07:34.299206 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-clb7f"] Dec 10 11:07:34 crc kubenswrapper[4682]: I1210 11:07:34.299429 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerName="dnsmasq-dns" containerID="cri-o://c742871a2c0d5f66fbc3ab0a2aef5b3c2eefd2ab03c4b9dea0a1dd5f2d9fe256" gracePeriod=10 Dec 10 11:07:35 crc kubenswrapper[4682]: I1210 11:07:35.377844 4682 generic.go:334] "Generic (PLEG): container finished" podID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerID="c742871a2c0d5f66fbc3ab0a2aef5b3c2eefd2ab03c4b9dea0a1dd5f2d9fe256" exitCode=0 Dec 10 11:07:35 crc kubenswrapper[4682]: I1210 11:07:35.377880 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" event={"ID":"92ecd66f-e6fa-4be2-b61c-38fa89fb015f","Type":"ContainerDied","Data":"c742871a2c0d5f66fbc3ab0a2aef5b3c2eefd2ab03c4b9dea0a1dd5f2d9fe256"} Dec 10 11:07:35 crc kubenswrapper[4682]: I1210 11:07:35.382083 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.013218 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-h5fss" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.015594 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.021058 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.067956 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-config-data\") pod \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068006 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-combined-ca-bundle\") pod \"c6258156-0c39-4f7b-a367-954f1eb68718\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068116 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-scripts\") pod \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068140 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-db-sync-config-data\") pod \"c6258156-0c39-4f7b-a367-954f1eb68718\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068196 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-logs\") pod \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068222 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-combined-ca-bundle\") pod \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068338 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p647q\" (UniqueName: \"kubernetes.io/projected/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-kube-api-access-p647q\") pod \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068384 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbcqm\" (UniqueName: \"kubernetes.io/projected/c6258156-0c39-4f7b-a367-954f1eb68718-kube-api-access-cbcqm\") pod \"c6258156-0c39-4f7b-a367-954f1eb68718\" (UID: \"c6258156-0c39-4f7b-a367-954f1eb68718\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068402 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-credential-keys\") pod \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068444 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-scripts\") pod \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068492 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-config-data\") pod \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068511 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-combined-ca-bundle\") pod \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\" (UID: \"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068560 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-fernet-keys\") pod \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.068591 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9dvj\" (UniqueName: \"kubernetes.io/projected/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-kube-api-access-n9dvj\") pod \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\" (UID: \"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.072112 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-logs" (OuterVolumeSpecName: "logs") pod "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" (UID: "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.086169 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-kube-api-access-n9dvj" (OuterVolumeSpecName: "kube-api-access-n9dvj") pod "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" (UID: "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2"). InnerVolumeSpecName "kube-api-access-n9dvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.086581 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-scripts" (OuterVolumeSpecName: "scripts") pod "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" (UID: "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.094431 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c6258156-0c39-4f7b-a367-954f1eb68718" (UID: "c6258156-0c39-4f7b-a367-954f1eb68718"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.096711 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6258156-0c39-4f7b-a367-954f1eb68718-kube-api-access-cbcqm" (OuterVolumeSpecName: "kube-api-access-cbcqm") pod "c6258156-0c39-4f7b-a367-954f1eb68718" (UID: "c6258156-0c39-4f7b-a367-954f1eb68718"). InnerVolumeSpecName "kube-api-access-cbcqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.098658 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" (UID: "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.100672 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" (UID: "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.109772 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-scripts" (OuterVolumeSpecName: "scripts") pod "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" (UID: "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.109984 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-kube-api-access-p647q" (OuterVolumeSpecName: "kube-api-access-p647q") pod "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" (UID: "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54"). InnerVolumeSpecName "kube-api-access-p647q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171893 4682 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171922 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9dvj\" (UniqueName: \"kubernetes.io/projected/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-kube-api-access-n9dvj\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171934 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171945 4682 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171961 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171970 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p647q\" (UniqueName: \"kubernetes.io/projected/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-kube-api-access-p647q\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171983 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbcqm\" (UniqueName: \"kubernetes.io/projected/c6258156-0c39-4f7b-a367-954f1eb68718-kube-api-access-cbcqm\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.171993 4682 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.172002 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.172439 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6258156-0c39-4f7b-a367-954f1eb68718" (UID: "c6258156-0c39-4f7b-a367-954f1eb68718"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.181438 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" (UID: "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.234050 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-config-data" (OuterVolumeSpecName: "config-data") pod "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" (UID: "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.235888 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-config-data" (OuterVolumeSpecName: "config-data") pod "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" (UID: "3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.256400 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" (UID: "8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.275342 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.275395 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.275405 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.275434 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6258156-0c39-4f7b-a367-954f1eb68718-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.275443 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.413895 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"72889b73-e773-4559-800e-a87032e35a05","Type":"ContainerStarted","Data":"512585cef0ad0eb65ff7e42f576a8fa48fc2e6da2bd9b2a7d1bb4eff98b273fc"} Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.416806 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" event={"ID":"92ecd66f-e6fa-4be2-b61c-38fa89fb015f","Type":"ContainerDied","Data":"ba33ac53f73497edb022300f889eb8515f9a17e62005725ab4a82c7258e57554"} Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.416849 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba33ac53f73497edb022300f889eb8515f9a17e62005725ab4a82c7258e57554" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.421972 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-ppzrh" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.422556 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-ppzrh" event={"ID":"3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2","Type":"ContainerDied","Data":"4d17f701062cd814bd7cb3de82d7a6b635d08a10088f547d8fc65fdd15031955"} Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.422601 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d17f701062cd814bd7cb3de82d7a6b635d08a10088f547d8fc65fdd15031955" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.428181 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.443042 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-h5fss" event={"ID":"8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54","Type":"ContainerDied","Data":"f7c838cc1cf437d79717bbc246a17a2ef34e73d3a0224dfeefe967ad7869aa81"} Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.443091 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7c838cc1cf437d79717bbc246a17a2ef34e73d3a0224dfeefe967ad7869aa81" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.443180 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-h5fss" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.456111 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-6g6l7" event={"ID":"c6258156-0c39-4f7b-a367-954f1eb68718","Type":"ContainerDied","Data":"03c7691688f184222240ac0b59ca9becb93b1acfe0a9c71897e08a56ac829fab"} Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.456347 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03c7691688f184222240ac0b59ca9becb93b1acfe0a9c71897e08a56ac829fab" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.456503 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-6g6l7" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.480640 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8p78\" (UniqueName: \"kubernetes.io/projected/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-kube-api-access-z8p78\") pod \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.481043 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-sb\") pod \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.481272 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-config\") pod \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.481378 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-dns-svc\") pod \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.481445 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-nb\") pod \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\" (UID: \"92ecd66f-e6fa-4be2-b61c-38fa89fb015f\") " Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.480977 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.485885 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.486632 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8f8fbad-ac9b-4103-8370-9693c234d655","Type":"ContainerStarted","Data":"4f578b25f7d27ccbee2e222c3b295d0d7d903b077a29e88583177ae93e56129b"} Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.487559 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-kube-api-access-z8p78" (OuterVolumeSpecName: "kube-api-access-z8p78") pod "92ecd66f-e6fa-4be2-b61c-38fa89fb015f" (UID: "92ecd66f-e6fa-4be2-b61c-38fa89fb015f"). InnerVolumeSpecName "kube-api-access-z8p78". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.584219 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8p78\" (UniqueName: \"kubernetes.io/projected/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-kube-api-access-z8p78\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.633783 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-config" (OuterVolumeSpecName: "config") pod "92ecd66f-e6fa-4be2-b61c-38fa89fb015f" (UID: "92ecd66f-e6fa-4be2-b61c-38fa89fb015f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.639610 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "92ecd66f-e6fa-4be2-b61c-38fa89fb015f" (UID: "92ecd66f-e6fa-4be2-b61c-38fa89fb015f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.678969 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "92ecd66f-e6fa-4be2-b61c-38fa89fb015f" (UID: "92ecd66f-e6fa-4be2-b61c-38fa89fb015f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.682594 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "92ecd66f-e6fa-4be2-b61c-38fa89fb015f" (UID: "92ecd66f-e6fa-4be2-b61c-38fa89fb015f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.686798 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.686834 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.686844 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:36 crc kubenswrapper[4682]: I1210 11:07:36.686854 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92ecd66f-e6fa-4be2-b61c-38fa89fb015f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.183358 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6548f86b64-snz6f"] Dec 10 11:07:37 crc kubenswrapper[4682]: E1210 11:07:37.185082 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerName="dnsmasq-dns" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185101 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerName="dnsmasq-dns" Dec 10 11:07:37 crc kubenswrapper[4682]: E1210 11:07:37.185155 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerName="init" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185161 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerName="init" Dec 10 11:07:37 crc kubenswrapper[4682]: E1210 11:07:37.185172 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" containerName="placement-db-sync" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185179 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" containerName="placement-db-sync" Dec 10 11:07:37 crc kubenswrapper[4682]: E1210 11:07:37.185188 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" containerName="keystone-bootstrap" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185194 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" containerName="keystone-bootstrap" Dec 10 11:07:37 crc kubenswrapper[4682]: E1210 11:07:37.185217 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6258156-0c39-4f7b-a367-954f1eb68718" containerName="barbican-db-sync" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185223 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6258156-0c39-4f7b-a367-954f1eb68718" containerName="barbican-db-sync" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185403 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6258156-0c39-4f7b-a367-954f1eb68718" containerName="barbican-db-sync" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185419 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" containerName="placement-db-sync" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185430 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" containerName="dnsmasq-dns" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.185442 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" containerName="keystone-bootstrap" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.186522 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.193645 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.193928 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.194094 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.194201 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-slbcz" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.194306 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.223283 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6548f86b64-snz6f"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.268708 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-548d5df8d4-8fcdl"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.270008 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.276122 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.276368 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.276523 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.276671 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-44cvq" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.276844 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.276981 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.279531 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-548d5df8d4-8fcdl"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.300857 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/846c1791-e576-402b-b8f1-2222c7dd6c4b-logs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.300925 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrzb6\" (UniqueName: \"kubernetes.io/projected/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-kube-api-access-mrzb6\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.300964 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-config-data\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.300992 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-scripts\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301056 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-combined-ca-bundle\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301078 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd8q7\" (UniqueName: \"kubernetes.io/projected/846c1791-e576-402b-b8f1-2222c7dd6c4b-kube-api-access-cd8q7\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301096 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-scripts\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301121 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-public-tls-certs\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301148 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-combined-ca-bundle\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301186 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-public-tls-certs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301218 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-internal-tls-certs\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301247 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-fernet-keys\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301275 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-credential-keys\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301304 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-internal-tls-certs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.301339 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-config-data\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.374771 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-84898968bc-8j5tc"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.376610 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.384751 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-84898968bc-8j5tc"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.402519 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-credential-keys\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.402561 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-internal-tls-certs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.404799 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-config-data\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.404888 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/846c1791-e576-402b-b8f1-2222c7dd6c4b-logs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.404913 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t9rj\" (UniqueName: \"kubernetes.io/projected/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-kube-api-access-6t9rj\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.404986 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-config-data\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405006 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrzb6\" (UniqueName: \"kubernetes.io/projected/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-kube-api-access-mrzb6\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405075 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-config-data\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405127 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-scripts\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405216 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-config-data-custom\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405289 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-combined-ca-bundle\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405311 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd8q7\" (UniqueName: \"kubernetes.io/projected/846c1791-e576-402b-b8f1-2222c7dd6c4b-kube-api-access-cd8q7\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405325 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-scripts\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405350 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-public-tls-certs\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405374 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-combined-ca-bundle\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405413 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-public-tls-certs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405443 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-combined-ca-bundle\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405464 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-internal-tls-certs\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405508 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-logs\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405526 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-fernet-keys\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.405927 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/846c1791-e576-402b-b8f1-2222c7dd6c4b-logs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.407570 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6b5dc89858-bj5qb"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.407591 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-credential-keys\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.409242 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-internal-tls-certs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.416618 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-internal-tls-certs\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.421319 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-combined-ca-bundle\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.421527 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-fernet-keys\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.422316 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-public-tls-certs\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.422510 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-scripts\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.422695 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-config-data\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.424240 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-public-tls-certs\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.430357 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.430437 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.431812 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-d46pv" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.438499 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.440879 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-scripts\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.441542 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-config-data\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.445813 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.452548 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6b5dc89858-bj5qb"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.459586 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/846c1791-e576-402b-b8f1-2222c7dd6c4b-combined-ca-bundle\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.498372 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd8q7\" (UniqueName: \"kubernetes.io/projected/846c1791-e576-402b-b8f1-2222c7dd6c4b-kube-api-access-cd8q7\") pod \"placement-6548f86b64-snz6f\" (UID: \"846c1791-e576-402b-b8f1-2222c7dd6c4b\") " pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.508229 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-combined-ca-bundle\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.508510 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-config-data-custom\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.508730 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-logs\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.508906 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-combined-ca-bundle\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509018 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-logs\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509146 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-config-data-custom\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509303 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-config-data\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509457 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t9rj\" (UniqueName: \"kubernetes.io/projected/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-kube-api-access-6t9rj\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509617 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-config-data\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509758 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn4vk\" (UniqueName: \"kubernetes.io/projected/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-kube-api-access-xn4vk\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.509758 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrzb6\" (UniqueName: \"kubernetes.io/projected/66f5310d-7fea-4c01-8fe5-fe6ec16b3c68-kube-api-access-mrzb6\") pod \"keystone-548d5df8d4-8fcdl\" (UID: \"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68\") " pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.512072 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-logs\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.516821 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-config-data\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.517458 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.518293 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-config-data-custom\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.525243 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-combined-ca-bundle\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.641237 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t9rj\" (UniqueName: \"kubernetes.io/projected/8b0a3dc4-5e16-4425-b932-e58a3cd2295a-kube-api-access-6t9rj\") pod \"barbican-worker-84898968bc-8j5tc\" (UID: \"8b0a3dc4-5e16-4425-b932-e58a3cd2295a\") " pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.641902 4682 generic.go:334] "Generic (PLEG): container finished" podID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" containerID="6763563e2a4caa280c3dfa595fb0f443b1e77703b9630b707d072babeac76023" exitCode=0 Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.641984 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8h57v" event={"ID":"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb","Type":"ContainerDied","Data":"6763563e2a4caa280c3dfa595fb0f443b1e77703b9630b707d072babeac76023"} Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.643396 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-config-data-custom\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.643449 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-config-data\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.643556 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn4vk\" (UniqueName: \"kubernetes.io/projected/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-kube-api-access-xn4vk\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.643587 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-combined-ca-bundle\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.643634 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-logs\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.643975 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-logs\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.645637 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-z67lh"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.647250 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.649063 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.650229 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-config-data-custom\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.672846 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-config-data\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.677074 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-combined-ca-bundle\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.682848 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-9q89f" event={"ID":"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d","Type":"ContainerStarted","Data":"cb98015b0a07b29eeb84dcd3e32e36e10eb4cbba2c2f5efe9c63e6094c517ac9"} Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.702100 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8f8fbad-ac9b-4103-8370-9693c234d655","Type":"ContainerStarted","Data":"841f903c864af41eb790a2eeb05e5894e8a041bc91027390add3e985943784a9"} Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.705801 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-z67lh"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.712515 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerStarted","Data":"214b1a88f0e22226c669b12ca77adfb8e5040a5885d9ec8d6c54c4d7f8e575b3"} Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.728256 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn4vk\" (UniqueName: \"kubernetes.io/projected/4f04d5f7-0e27-4de0-83c3-10a07dcbc97d-kube-api-access-xn4vk\") pod \"barbican-keystone-listener-6b5dc89858-bj5qb\" (UID: \"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d\") " pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.741752 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-clb7f" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.753610 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"72889b73-e773-4559-800e-a87032e35a05","Type":"ContainerStarted","Data":"bbb7dbed30b8d9ae6765192592f9188ce3c230c04639e698ed66bbb3655bdcaa"} Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.757374 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-db-sync-9q89f" podStartSLOduration=3.233961254 podStartE2EDuration="52.757355155s" podCreationTimestamp="2025-12-10 11:06:45 +0000 UTC" firstStartedPulling="2025-12-10 11:06:46.541670171 +0000 UTC m=+1286.861880921" lastFinishedPulling="2025-12-10 11:07:36.065064072 +0000 UTC m=+1336.385274822" observedRunningTime="2025-12-10 11:07:37.756145408 +0000 UTC m=+1338.076356188" watchObservedRunningTime="2025-12-10 11:07:37.757355155 +0000 UTC m=+1338.077565905" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.758342 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.758423 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-config\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.758536 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.758727 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p64w4\" (UniqueName: \"kubernetes.io/projected/5abd16ef-83c7-4056-814c-c6937613ccce-kube-api-access-p64w4\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.760316 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-svc\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.760363 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.856819 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-84898968bc-8j5tc" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.899844 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p64w4\" (UniqueName: \"kubernetes.io/projected/5abd16ef-83c7-4056-814c-c6937613ccce-kube-api-access-p64w4\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.899944 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-svc\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.899969 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.900154 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.900228 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-config\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.900311 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.901201 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.901774 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-svc\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.902263 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.902675 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.903199 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-config\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.929187 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.935057 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-clb7f"] Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.940743 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p64w4\" (UniqueName: \"kubernetes.io/projected/5abd16ef-83c7-4056-814c-c6937613ccce-kube-api-access-p64w4\") pod \"dnsmasq-dns-688c87cc99-z67lh\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.983209 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:37 crc kubenswrapper[4682]: I1210 11:07:37.983904 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-clb7f"] Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.025532 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7b5d4b7b66-h8tzl"] Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.027456 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.030994 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.040001 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7b5d4b7b66-h8tzl"] Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.106786 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data-custom\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.106840 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-combined-ca-bundle\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.106875 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-logs\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.106900 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.107007 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nl4h6\" (UniqueName: \"kubernetes.io/projected/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-kube-api-access-nl4h6\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.213177 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nl4h6\" (UniqueName: \"kubernetes.io/projected/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-kube-api-access-nl4h6\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.218657 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data-custom\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.218712 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-combined-ca-bundle\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.218753 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-logs\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.218776 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.220044 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-logs\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.232595 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data-custom\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.256142 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nl4h6\" (UniqueName: \"kubernetes.io/projected/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-kube-api-access-nl4h6\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.256444 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-combined-ca-bundle\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.257080 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data\") pod \"barbican-api-7b5d4b7b66-h8tzl\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.408517 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92ecd66f-e6fa-4be2-b61c-38fa89fb015f" path="/var/lib/kubelet/pods/92ecd66f-e6fa-4be2-b61c-38fa89fb015f/volumes" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.409787 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.561995 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6548f86b64-snz6f"] Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.760071 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8f8fbad-ac9b-4103-8370-9693c234d655","Type":"ContainerStarted","Data":"c07a004f8ebf4a18c4b2cd5c2c652e998b5169cb8dd7a1d921bc1b8a41a069e7"} Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.780373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"72889b73-e773-4559-800e-a87032e35a05","Type":"ContainerStarted","Data":"1f2f1769d31823f996453883380bcdc3a63510f9032ad956230c5077e26381df"} Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.780928 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-548d5df8d4-8fcdl"] Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.783972 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6548f86b64-snz6f" event={"ID":"846c1791-e576-402b-b8f1-2222c7dd6c4b","Type":"ContainerStarted","Data":"7748edace33464d03b137f5d95b801e4f5c4156623af202d3ff6814cb79457da"} Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.819806 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-z67lh"] Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.846784 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.84676943 podStartE2EDuration="9.84676943s" podCreationTimestamp="2025-12-10 11:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:38.785040985 +0000 UTC m=+1339.105251755" watchObservedRunningTime="2025-12-10 11:07:38.84676943 +0000 UTC m=+1339.166980180" Dec 10 11:07:38 crc kubenswrapper[4682]: I1210 11:07:38.912519 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.912493259 podStartE2EDuration="9.912493259s" podCreationTimestamp="2025-12-10 11:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:38.815333674 +0000 UTC m=+1339.135544424" watchObservedRunningTime="2025-12-10 11:07:38.912493259 +0000 UTC m=+1339.232704009" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.008143 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6b5dc89858-bj5qb"] Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.027593 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-84898968bc-8j5tc"] Dec 10 11:07:39 crc kubenswrapper[4682]: W1210 11:07:39.099934 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b0a3dc4_5e16_4425_b932_e58a3cd2295a.slice/crio-a3dbf7817470e4807254470e763051af03c383179be95c855be6e64f5b5b3547 WatchSource:0}: Error finding container a3dbf7817470e4807254470e763051af03c383179be95c855be6e64f5b5b3547: Status 404 returned error can't find the container with id a3dbf7817470e4807254470e763051af03c383179be95c855be6e64f5b5b3547 Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.221635 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7b5d4b7b66-h8tzl"] Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.445172 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8h57v" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.574272 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9gcl\" (UniqueName: \"kubernetes.io/projected/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-kube-api-access-f9gcl\") pod \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.574334 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-config-data\") pod \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.574379 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-db-sync-config-data\") pod \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.574554 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-etc-machine-id\") pod \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.574600 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-combined-ca-bundle\") pod \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.574632 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-scripts\") pod \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\" (UID: \"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb\") " Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.577123 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" (UID: "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.579913 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-kube-api-access-f9gcl" (OuterVolumeSpecName: "kube-api-access-f9gcl") pod "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" (UID: "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb"). InnerVolumeSpecName "kube-api-access-f9gcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.582886 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" (UID: "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.584988 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-scripts" (OuterVolumeSpecName: "scripts") pod "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" (UID: "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.626782 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" (UID: "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.677704 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9gcl\" (UniqueName: \"kubernetes.io/projected/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-kube-api-access-f9gcl\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.677732 4682 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.677744 4682 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.677756 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.677767 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.685185 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-config-data" (OuterVolumeSpecName: "config-data") pod "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" (UID: "382d9ec8-5a3b-47b3-a301-955c7e2a4ecb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.797595 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.819982 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8h57v" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.822199 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8h57v" event={"ID":"382d9ec8-5a3b-47b3-a301-955c7e2a4ecb","Type":"ContainerDied","Data":"21d9b2c38ae39e2151956d2c529711a1b843efaa9b2618c9ac16b57f3627466b"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.822230 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21d9b2c38ae39e2151956d2c529711a1b843efaa9b2618c9ac16b57f3627466b" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.824300 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" event={"ID":"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6","Type":"ContainerStarted","Data":"fc347e3b3bc31dcf54cec7aca8d90c8d11ce0b8b41b9e39234e5678f80df584f"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.824349 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" event={"ID":"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6","Type":"ContainerStarted","Data":"77cab110191949cca7f1063e7ad4247b2c07f3aaedb22d74ee2c3cdc0d3cd654"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.828163 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84898968bc-8j5tc" event={"ID":"8b0a3dc4-5e16-4425-b932-e58a3cd2295a","Type":"ContainerStarted","Data":"a3dbf7817470e4807254470e763051af03c383179be95c855be6e64f5b5b3547"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.834811 4682 generic.go:334] "Generic (PLEG): container finished" podID="5abd16ef-83c7-4056-814c-c6937613ccce" containerID="01c6c916c8fadf7544df60c05f656772683cab4e119c8feb2b2152095d7dfeef" exitCode=0 Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.834867 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" event={"ID":"5abd16ef-83c7-4056-814c-c6937613ccce","Type":"ContainerDied","Data":"01c6c916c8fadf7544df60c05f656772683cab4e119c8feb2b2152095d7dfeef"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.834891 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" event={"ID":"5abd16ef-83c7-4056-814c-c6937613ccce","Type":"ContainerStarted","Data":"24683708fe467b73715ea5784f76db9d9a3c1b958170f8465c1bec3c25100b6e"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.837223 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" event={"ID":"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d","Type":"ContainerStarted","Data":"e4cebb83e080be1aa66b75098a70de1353d3d8e1d4f43043d56472ad27ccd677"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.842100 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-548d5df8d4-8fcdl" event={"ID":"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68","Type":"ContainerStarted","Data":"9ef9b48f7d6c3eac7d8341e019f95d8c9969fd0059fb0c4b9aebb16c9a6d501c"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.842164 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-548d5df8d4-8fcdl" event={"ID":"66f5310d-7fea-4c01-8fe5-fe6ec16b3c68","Type":"ContainerStarted","Data":"e320b977e6ddf7d4399b3578e702e00e2effe07c129926a4788b7884eebca3e3"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.843158 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.851755 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6548f86b64-snz6f" event={"ID":"846c1791-e576-402b-b8f1-2222c7dd6c4b","Type":"ContainerStarted","Data":"5adb199f5d4d9cf4d7878ca7a63bebeb9ba793f12868a4f3e6d79c0213ca104c"} Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.996746 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:07:39 crc kubenswrapper[4682]: I1210 11:07:39.996793 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.044037 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.044089 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.051983 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-548d5df8d4-8fcdl" podStartSLOduration=3.051964492 podStartE2EDuration="3.051964492s" podCreationTimestamp="2025-12-10 11:07:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:39.910790398 +0000 UTC m=+1340.231001158" watchObservedRunningTime="2025-12-10 11:07:40.051964492 +0000 UTC m=+1340.372175242" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.128982 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.156774 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.178888 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:40 crc kubenswrapper[4682]: E1210 11:07:40.179259 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" containerName="cinder-db-sync" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.179270 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" containerName="cinder-db-sync" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.179454 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" containerName="cinder-db-sync" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.180680 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.202138 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.202416 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.202630 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.202812 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.202918 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-pc8qf" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.220833 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.220877 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.220918 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.220943 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-scripts\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.220958 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg94t\" (UniqueName: \"kubernetes.io/projected/1067eae2-6d09-419f-8a0d-6f79de303aea-kube-api-access-gg94t\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.221015 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1067eae2-6d09-419f-8a0d-6f79de303aea-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.237898 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.290570 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.324913 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1067eae2-6d09-419f-8a0d-6f79de303aea-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.325019 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.325050 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.325093 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.325120 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-scripts\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.325141 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg94t\" (UniqueName: \"kubernetes.io/projected/1067eae2-6d09-419f-8a0d-6f79de303aea-kube-api-access-gg94t\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.325531 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1067eae2-6d09-419f-8a0d-6f79de303aea-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.337241 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.399351 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg94t\" (UniqueName: \"kubernetes.io/projected/1067eae2-6d09-419f-8a0d-6f79de303aea-kube-api-access-gg94t\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.399807 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.400307 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-scripts\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.401041 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data\") pod \"cinder-scheduler-0\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.752169 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.852606 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-z67lh"] Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.912342 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-wkcwn"] Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.918612 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.959730 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-config\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.959812 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.959963 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.960047 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.960078 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbhdd\" (UniqueName: \"kubernetes.io/projected/9a8833f9-3cd6-4100-acae-847c61f5a6ed-kube-api-access-gbhdd\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.960108 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.995220 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6548f86b64-snz6f" event={"ID":"846c1791-e576-402b-b8f1-2222c7dd6c4b","Type":"ContainerStarted","Data":"248262389f2d1afa7f0910bd32e4c3b1daa504b4827c152eee2ad621f5d8e260"} Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.996018 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:40 crc kubenswrapper[4682]: I1210 11:07:40.996047 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.002978 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-wkcwn"] Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.036810 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.036995 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.037179 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" event={"ID":"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6","Type":"ContainerStarted","Data":"40094886e3e24b9d038d32b1037b0dc3d88fc91eaa27596d28421f3012cfea6e"} Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.037208 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.037220 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.037245 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.041927 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.061403 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.061690 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.061776 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbhdd\" (UniqueName: \"kubernetes.io/projected/9a8833f9-3cd6-4100-acae-847c61f5a6ed-kube-api-access-gbhdd\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.061856 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.061943 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-config\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.062036 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.062927 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.063608 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.063726 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.064284 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.064394 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-config\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.085932 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.087822 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.103891 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.109275 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.146210 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbhdd\" (UniqueName: \"kubernetes.io/projected/9a8833f9-3cd6-4100-acae-847c61f5a6ed-kube-api-access-gbhdd\") pod \"dnsmasq-dns-6bb4fc677f-wkcwn\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269443 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-scripts\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269663 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269758 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6n8k6\" (UniqueName: \"kubernetes.io/projected/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-kube-api-access-6n8k6\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269799 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-logs\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269849 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269905 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.269952 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data-custom\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.285378 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6548f86b64-snz6f" podStartSLOduration=4.285351127 podStartE2EDuration="4.285351127s" podCreationTimestamp="2025-12-10 11:07:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:41.205758234 +0000 UTC m=+1341.525969004" watchObservedRunningTime="2025-12-10 11:07:41.285351127 +0000 UTC m=+1341.605561887" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.350229 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.354713 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" podStartSLOduration=4.354691029 podStartE2EDuration="4.354691029s" podCreationTimestamp="2025-12-10 11:07:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:41.298606822 +0000 UTC m=+1341.618817582" watchObservedRunningTime="2025-12-10 11:07:41.354691029 +0000 UTC m=+1341.674901799" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.374733 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6n8k6\" (UniqueName: \"kubernetes.io/projected/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-kube-api-access-6n8k6\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.374796 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-logs\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.374830 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.374869 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.374900 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data-custom\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.374919 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-scripts\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.375013 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.375095 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.375723 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-logs\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.419930 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.429637 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.430029 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6n8k6\" (UniqueName: \"kubernetes.io/projected/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-kube-api-access-6n8k6\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.433366 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data-custom\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.434421 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-scripts\") pod \"cinder-api-0\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.732894 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:07:41 crc kubenswrapper[4682]: I1210 11:07:41.746087 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:41 crc kubenswrapper[4682]: W1210 11:07:41.841892 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1067eae2_6d09_419f_8a0d_6f79de303aea.slice/crio-8b6b9781bc137d853db0bbb5d91ecb82b3ad623d2b73df9fcc2577759a51da21 WatchSource:0}: Error finding container 8b6b9781bc137d853db0bbb5d91ecb82b3ad623d2b73df9fcc2577759a51da21: Status 404 returned error can't find the container with id 8b6b9781bc137d853db0bbb5d91ecb82b3ad623d2b73df9fcc2577759a51da21 Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.090262 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" event={"ID":"5abd16ef-83c7-4056-814c-c6937613ccce","Type":"ContainerStarted","Data":"5372924aaca2b14f3b73b5b5084ec5396fc73d31daa2f696b56cc0f324d1dae7"} Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.091076 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" containerName="dnsmasq-dns" containerID="cri-o://5372924aaca2b14f3b73b5b5084ec5396fc73d31daa2f696b56cc0f324d1dae7" gracePeriod=10 Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.091453 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.104013 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1067eae2-6d09-419f-8a0d-6f79de303aea","Type":"ContainerStarted","Data":"8b6b9781bc137d853db0bbb5d91ecb82b3ad623d2b73df9fcc2577759a51da21"} Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.129987 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" podStartSLOduration=5.129967651 podStartE2EDuration="5.129967651s" podCreationTimestamp="2025-12-10 11:07:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:42.120927188 +0000 UTC m=+1342.441137948" watchObservedRunningTime="2025-12-10 11:07:42.129967651 +0000 UTC m=+1342.450178401" Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.503006 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-wkcwn"] Dec 10 11:07:42 crc kubenswrapper[4682]: I1210 11:07:42.850786 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.131007 4682 generic.go:334] "Generic (PLEG): container finished" podID="5abd16ef-83c7-4056-814c-c6937613ccce" containerID="5372924aaca2b14f3b73b5b5084ec5396fc73d31daa2f696b56cc0f324d1dae7" exitCode=0 Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.131063 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" event={"ID":"5abd16ef-83c7-4056-814c-c6937613ccce","Type":"ContainerDied","Data":"5372924aaca2b14f3b73b5b5084ec5396fc73d31daa2f696b56cc0f324d1dae7"} Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.131339 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" event={"ID":"5abd16ef-83c7-4056-814c-c6937613ccce","Type":"ContainerDied","Data":"24683708fe467b73715ea5784f76db9d9a3c1b958170f8465c1bec3c25100b6e"} Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.131366 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24683708fe467b73715ea5784f76db9d9a3c1b958170f8465c1bec3c25100b6e" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.136043 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ed05abe1-c54a-4c41-9478-d5a9a0ea076c","Type":"ContainerStarted","Data":"dd87a27dfb8f1e0ffa68c15c89167130502f92d605a6daf74fc6ae2acf988608"} Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.138749 4682 generic.go:334] "Generic (PLEG): container finished" podID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerID="0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a" exitCode=0 Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.138930 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" event={"ID":"9a8833f9-3cd6-4100-acae-847c61f5a6ed","Type":"ContainerDied","Data":"0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a"} Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.138983 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" event={"ID":"9a8833f9-3cd6-4100-acae-847c61f5a6ed","Type":"ContainerStarted","Data":"f937827ef444ffb4ca27d07fa2a28206f5f51e150f1bc8db5b2f43fbdcd7740c"} Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.139123 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.275540 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.367966 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-swift-storage-0\") pod \"5abd16ef-83c7-4056-814c-c6937613ccce\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.368210 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p64w4\" (UniqueName: \"kubernetes.io/projected/5abd16ef-83c7-4056-814c-c6937613ccce-kube-api-access-p64w4\") pod \"5abd16ef-83c7-4056-814c-c6937613ccce\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.368269 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-nb\") pod \"5abd16ef-83c7-4056-814c-c6937613ccce\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.368322 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-config\") pod \"5abd16ef-83c7-4056-814c-c6937613ccce\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.368364 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-svc\") pod \"5abd16ef-83c7-4056-814c-c6937613ccce\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.368391 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-sb\") pod \"5abd16ef-83c7-4056-814c-c6937613ccce\" (UID: \"5abd16ef-83c7-4056-814c-c6937613ccce\") " Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.422713 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5abd16ef-83c7-4056-814c-c6937613ccce-kube-api-access-p64w4" (OuterVolumeSpecName: "kube-api-access-p64w4") pod "5abd16ef-83c7-4056-814c-c6937613ccce" (UID: "5abd16ef-83c7-4056-814c-c6937613ccce"). InnerVolumeSpecName "kube-api-access-p64w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.481064 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p64w4\" (UniqueName: \"kubernetes.io/projected/5abd16ef-83c7-4056-814c-c6937613ccce-kube-api-access-p64w4\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.686302 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5abd16ef-83c7-4056-814c-c6937613ccce" (UID: "5abd16ef-83c7-4056-814c-c6937613ccce"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.698239 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.710160 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5abd16ef-83c7-4056-814c-c6937613ccce" (UID: "5abd16ef-83c7-4056-814c-c6937613ccce"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.714219 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-config" (OuterVolumeSpecName: "config") pod "5abd16ef-83c7-4056-814c-c6937613ccce" (UID: "5abd16ef-83c7-4056-814c-c6937613ccce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.738980 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5abd16ef-83c7-4056-814c-c6937613ccce" (UID: "5abd16ef-83c7-4056-814c-c6937613ccce"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.739565 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5abd16ef-83c7-4056-814c-c6937613ccce" (UID: "5abd16ef-83c7-4056-814c-c6937613ccce"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.800084 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.800125 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.800140 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:43 crc kubenswrapper[4682]: I1210 11:07:43.800152 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5abd16ef-83c7-4056-814c-c6937613ccce-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:44 crc kubenswrapper[4682]: I1210 11:07:44.160595 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-z67lh" Dec 10 11:07:44 crc kubenswrapper[4682]: I1210 11:07:44.205340 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-z67lh"] Dec 10 11:07:44 crc kubenswrapper[4682]: I1210 11:07:44.220840 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-z67lh"] Dec 10 11:07:44 crc kubenswrapper[4682]: I1210 11:07:44.284984 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:44 crc kubenswrapper[4682]: I1210 11:07:44.412500 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" path="/var/lib/kubelet/pods/5abd16ef-83c7-4056-814c-c6937613ccce/volumes" Dec 10 11:07:44 crc kubenswrapper[4682]: I1210 11:07:44.835933 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.074876 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.102622 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.180940 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ed05abe1-c54a-4c41-9478-d5a9a0ea076c","Type":"ContainerStarted","Data":"2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847"} Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.185759 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1067eae2-6d09-419f-8a0d-6f79de303aea","Type":"ContainerStarted","Data":"63b289b1a5d53d5c723db14e06dc94641c0f7a3836a69fb28c73866c4915a3e8"} Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.447999 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-f48d9bb94-4d9lk"] Dec 10 11:07:45 crc kubenswrapper[4682]: E1210 11:07:45.448545 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" containerName="dnsmasq-dns" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.448569 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" containerName="dnsmasq-dns" Dec 10 11:07:45 crc kubenswrapper[4682]: E1210 11:07:45.448606 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" containerName="init" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.448614 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" containerName="init" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.448870 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="5abd16ef-83c7-4056-814c-c6937613ccce" containerName="dnsmasq-dns" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.450847 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.459540 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f48d9bb94-4d9lk"] Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.471853 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.472049 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573249 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd9mz\" (UniqueName: \"kubernetes.io/projected/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-kube-api-access-pd9mz\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573356 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-config-data\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573431 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-public-tls-certs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573457 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-internal-tls-certs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573570 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-combined-ca-bundle\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573602 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-config-data-custom\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.573852 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-logs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.675374 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-config-data-custom\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676188 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-logs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676251 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd9mz\" (UniqueName: \"kubernetes.io/projected/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-kube-api-access-pd9mz\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676300 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-config-data\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676332 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-public-tls-certs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676355 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-internal-tls-certs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676384 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-combined-ca-bundle\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.676711 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-logs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.687324 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-config-data\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.687978 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-config-data-custom\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.688517 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-combined-ca-bundle\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.689047 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-internal-tls-certs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.701061 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-public-tls-certs\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.712101 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd9mz\" (UniqueName: \"kubernetes.io/projected/2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f-kube-api-access-pd9mz\") pod \"barbican-api-f48d9bb94-4d9lk\" (UID: \"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f\") " pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:45 crc kubenswrapper[4682]: I1210 11:07:45.804035 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:46 crc kubenswrapper[4682]: I1210 11:07:46.418248 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f7fc58469-rvhd4" Dec 10 11:07:46 crc kubenswrapper[4682]: I1210 11:07:46.506568 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-74cb8f8cb4-8fg27"] Dec 10 11:07:46 crc kubenswrapper[4682]: I1210 11:07:46.506785 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-74cb8f8cb4-8fg27" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-api" containerID="cri-o://faa278a4b836a3b718a93da59d3922dc7ee5f150d84552d6723475d5a46656b6" gracePeriod=30 Dec 10 11:07:46 crc kubenswrapper[4682]: I1210 11:07:46.507179 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-74cb8f8cb4-8fg27" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-httpd" containerID="cri-o://43a5b4fc146dbd44eb112f3dde542dbafb8cebe63e489137121408672242f4a2" gracePeriod=30 Dec 10 11:07:46 crc kubenswrapper[4682]: I1210 11:07:46.556027 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f48d9bb94-4d9lk"] Dec 10 11:07:46 crc kubenswrapper[4682]: I1210 11:07:46.884036 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.261720 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84898968bc-8j5tc" event={"ID":"8b0a3dc4-5e16-4425-b932-e58a3cd2295a","Type":"ContainerStarted","Data":"df28a1dc69619ce8242c9feb037fb15ce4f7078cf5a504d2af386f6e2c7bab79"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.261978 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-84898968bc-8j5tc" event={"ID":"8b0a3dc4-5e16-4425-b932-e58a3cd2295a","Type":"ContainerStarted","Data":"73269c5945c2363d3ab1a83219171c058ab1c7d5c608325c8f75da8780a4edba"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.268907 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" event={"ID":"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d","Type":"ContainerStarted","Data":"5f6381b6376ef4f8dc4b975711457883e81a76d3d106d0368aed452effe219f2"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.268960 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" event={"ID":"4f04d5f7-0e27-4de0-83c3-10a07dcbc97d","Type":"ContainerStarted","Data":"45c29130d572e6b57110a874a4d129a737783af1825b1126e0f0276d5543f208"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.271928 4682 generic.go:334] "Generic (PLEG): container finished" podID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerID="43a5b4fc146dbd44eb112f3dde542dbafb8cebe63e489137121408672242f4a2" exitCode=0 Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.272011 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74cb8f8cb4-8fg27" event={"ID":"cae3bbd4-4d3b-4cce-969d-6a742664664e","Type":"ContainerDied","Data":"43a5b4fc146dbd44eb112f3dde542dbafb8cebe63e489137121408672242f4a2"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.282172 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f48d9bb94-4d9lk" event={"ID":"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f","Type":"ContainerStarted","Data":"13db18e22e66f121e1470960920f32b56d9a0ad49a92683f0378a378edc95fa6"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.282230 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f48d9bb94-4d9lk" event={"ID":"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f","Type":"ContainerStarted","Data":"c7290bb5717204a024ebab97fde7455baf2f3eaa667d0226e0c3e9a5bcb86664"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.297116 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api-log" containerID="cri-o://2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847" gracePeriod=30 Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.297233 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.297273 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api" containerID="cri-o://55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726" gracePeriod=30 Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.303905 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" event={"ID":"9a8833f9-3cd6-4100-acae-847c61f5a6ed","Type":"ContainerStarted","Data":"9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.305050 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.312558 4682 generic.go:334] "Generic (PLEG): container finished" podID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" containerID="cb98015b0a07b29eeb84dcd3e32e36e10eb4cbba2c2f5efe9c63e6094c517ac9" exitCode=0 Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.312633 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-9q89f" event={"ID":"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d","Type":"ContainerDied","Data":"cb98015b0a07b29eeb84dcd3e32e36e10eb4cbba2c2f5efe9c63e6094c517ac9"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.313783 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-84898968bc-8j5tc" podStartSLOduration=3.638376405 podStartE2EDuration="10.313755993s" podCreationTimestamp="2025-12-10 11:07:37 +0000 UTC" firstStartedPulling="2025-12-10 11:07:39.129398235 +0000 UTC m=+1339.449608985" lastFinishedPulling="2025-12-10 11:07:45.804777823 +0000 UTC m=+1346.124988573" observedRunningTime="2025-12-10 11:07:47.278267851 +0000 UTC m=+1347.598478621" watchObservedRunningTime="2025-12-10 11:07:47.313755993 +0000 UTC m=+1347.633966743" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.326980 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6b5dc89858-bj5qb" podStartSLOduration=3.476187434 podStartE2EDuration="10.326960177s" podCreationTimestamp="2025-12-10 11:07:37 +0000 UTC" firstStartedPulling="2025-12-10 11:07:39.034852083 +0000 UTC m=+1339.355062833" lastFinishedPulling="2025-12-10 11:07:45.885624826 +0000 UTC m=+1346.205835576" observedRunningTime="2025-12-10 11:07:47.3063263 +0000 UTC m=+1347.626537070" watchObservedRunningTime="2025-12-10 11:07:47.326960177 +0000 UTC m=+1347.647170927" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.328455 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1067eae2-6d09-419f-8a0d-6f79de303aea","Type":"ContainerStarted","Data":"f45a1cca0160ae153dd786f349f66150a19fa61126e3f6269a55cef3a8dcff8f"} Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.365228 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.365209075 podStartE2EDuration="7.365209075s" podCreationTimestamp="2025-12-10 11:07:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:47.364999868 +0000 UTC m=+1347.685210638" watchObservedRunningTime="2025-12-10 11:07:47.365209075 +0000 UTC m=+1347.685419825" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.411048 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" podStartSLOduration=7.411022501 podStartE2EDuration="7.411022501s" podCreationTimestamp="2025-12-10 11:07:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:47.390527298 +0000 UTC m=+1347.710738068" watchObservedRunningTime="2025-12-10 11:07:47.411022501 +0000 UTC m=+1347.731233251" Dec 10 11:07:47 crc kubenswrapper[4682]: I1210 11:07:47.423856 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.191663935 podStartE2EDuration="7.423830152s" podCreationTimestamp="2025-12-10 11:07:40 +0000 UTC" firstStartedPulling="2025-12-10 11:07:41.860120356 +0000 UTC m=+1342.180331106" lastFinishedPulling="2025-12-10 11:07:43.092286583 +0000 UTC m=+1343.412497323" observedRunningTime="2025-12-10 11:07:47.419181426 +0000 UTC m=+1347.739392186" watchObservedRunningTime="2025-12-10 11:07:47.423830152 +0000 UTC m=+1347.744040902" Dec 10 11:07:48 crc kubenswrapper[4682]: I1210 11:07:48.339622 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f48d9bb94-4d9lk" event={"ID":"2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f","Type":"ContainerStarted","Data":"ec38ad21ad7a1a0dca32ade0fa3d0b07f6e9084d33104f406e7cb24e056facfd"} Dec 10 11:07:48 crc kubenswrapper[4682]: I1210 11:07:48.339992 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:48 crc kubenswrapper[4682]: I1210 11:07:48.341679 4682 generic.go:334] "Generic (PLEG): container finished" podID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerID="2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847" exitCode=143 Dec 10 11:07:48 crc kubenswrapper[4682]: I1210 11:07:48.341772 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ed05abe1-c54a-4c41-9478-d5a9a0ea076c","Type":"ContainerStarted","Data":"55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726"} Dec 10 11:07:48 crc kubenswrapper[4682]: I1210 11:07:48.341811 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ed05abe1-c54a-4c41-9478-d5a9a0ea076c","Type":"ContainerDied","Data":"2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847"} Dec 10 11:07:48 crc kubenswrapper[4682]: I1210 11:07:48.365372 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-f48d9bb94-4d9lk" podStartSLOduration=3.365351122 podStartE2EDuration="3.365351122s" podCreationTimestamp="2025-12-10 11:07:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:48.358009343 +0000 UTC m=+1348.678220103" watchObservedRunningTime="2025-12-10 11:07:48.365351122 +0000 UTC m=+1348.685561872" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.012069 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.183419 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-certs\") pod \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.183491 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-scripts\") pod \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.183719 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbvjs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-kube-api-access-xbvjs\") pod \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.183784 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-config-data\") pod \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.183814 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-combined-ca-bundle\") pod \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\" (UID: \"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d\") " Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.191111 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-scripts" (OuterVolumeSpecName: "scripts") pod "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" (UID: "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.191758 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-certs" (OuterVolumeSpecName: "certs") pod "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" (UID: "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.209098 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.209852 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-kube-api-access-xbvjs" (OuterVolumeSpecName: "kube-api-access-xbvjs") pod "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" (UID: "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d"). InnerVolumeSpecName "kube-api-access-xbvjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.229381 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-config-data" (OuterVolumeSpecName: "config-data") pod "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" (UID: "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.291619 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbvjs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-kube-api-access-xbvjs\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.291666 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.291679 4682 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.291696 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.299860 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" (UID: "997c9b87-b796-40a3-a9c9-cf1e2a3abc4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.397138 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.412075 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-9q89f" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.412041 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-9q89f" event={"ID":"997c9b87-b796-40a3-a9c9-cf1e2a3abc4d","Type":"ContainerDied","Data":"2a65ad3ed36e3c0a9cd005f63ae5d71b4fbba1900fc6615234bcbe9568f123c3"} Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.413349 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a65ad3ed36e3c0a9cd005f63ae5d71b4fbba1900fc6615234bcbe9568f123c3" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.413374 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.469269 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-storageinit-bfwj8"] Dec 10 11:07:49 crc kubenswrapper[4682]: E1210 11:07:49.469805 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" containerName="cloudkitty-db-sync" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.469821 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" containerName="cloudkitty-db-sync" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.469989 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" containerName="cloudkitty-db-sync" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.471112 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.477340 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-client-internal" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.477822 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.479856 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-scripts" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.482460 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-config-data" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.482608 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-cloudkitty-dockercfg-6svbf" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.519698 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-storageinit-bfwj8"] Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.605125 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4v89\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-kube-api-access-l4v89\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.605262 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-certs\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.605294 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-scripts\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.605351 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-combined-ca-bundle\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.605390 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-config-data\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.715441 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4v89\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-kube-api-access-l4v89\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.715676 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-certs\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.715706 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-scripts\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.715812 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-combined-ca-bundle\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.715880 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-config-data\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.722599 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-scripts\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.724180 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-combined-ca-bundle\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.728453 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-config-data\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.734134 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-certs\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.740158 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4v89\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-kube-api-access-l4v89\") pod \"cloudkitty-storageinit-bfwj8\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:49 crc kubenswrapper[4682]: I1210 11:07:49.804397 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:07:50 crc kubenswrapper[4682]: I1210 11:07:50.462246 4682 generic.go:334] "Generic (PLEG): container finished" podID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerID="faa278a4b836a3b718a93da59d3922dc7ee5f150d84552d6723475d5a46656b6" exitCode=0 Dec 10 11:07:50 crc kubenswrapper[4682]: I1210 11:07:50.462603 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74cb8f8cb4-8fg27" event={"ID":"cae3bbd4-4d3b-4cce-969d-6a742664664e","Type":"ContainerDied","Data":"faa278a4b836a3b718a93da59d3922dc7ee5f150d84552d6723475d5a46656b6"} Dec 10 11:07:50 crc kubenswrapper[4682]: I1210 11:07:50.675528 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:50 crc kubenswrapper[4682]: I1210 11:07:50.753801 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:07:50 crc kubenswrapper[4682]: I1210 11:07:50.754167 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 10 11:07:51 crc kubenswrapper[4682]: I1210 11:07:51.084884 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 10 11:07:51 crc kubenswrapper[4682]: I1210 11:07:51.352539 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:07:51 crc kubenswrapper[4682]: I1210 11:07:51.425673 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-thrpw"] Dec 10 11:07:51 crc kubenswrapper[4682]: I1210 11:07:51.426605 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="dnsmasq-dns" containerID="cri-o://1b597744ef7ccfe50dac0a258b9776fc9871100f64e0c5ed0d9ad8f27681f900" gracePeriod=10 Dec 10 11:07:51 crc kubenswrapper[4682]: I1210 11:07:51.618110 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:52 crc kubenswrapper[4682]: I1210 11:07:52.503316 4682 generic.go:334] "Generic (PLEG): container finished" podID="9483a109-197b-41a8-94ee-498bee3a67eb" containerID="1b597744ef7ccfe50dac0a258b9776fc9871100f64e0c5ed0d9ad8f27681f900" exitCode=0 Dec 10 11:07:52 crc kubenswrapper[4682]: I1210 11:07:52.503606 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="cinder-scheduler" containerID="cri-o://63b289b1a5d53d5c723db14e06dc94641c0f7a3836a69fb28c73866c4915a3e8" gracePeriod=30 Dec 10 11:07:52 crc kubenswrapper[4682]: I1210 11:07:52.504077 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" event={"ID":"9483a109-197b-41a8-94ee-498bee3a67eb","Type":"ContainerDied","Data":"1b597744ef7ccfe50dac0a258b9776fc9871100f64e0c5ed0d9ad8f27681f900"} Dec 10 11:07:52 crc kubenswrapper[4682]: I1210 11:07:52.504582 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="probe" containerID="cri-o://f45a1cca0160ae153dd786f349f66150a19fa61126e3f6269a55cef3a8dcff8f" gracePeriod=30 Dec 10 11:07:53 crc kubenswrapper[4682]: I1210 11:07:53.520758 4682 generic.go:334] "Generic (PLEG): container finished" podID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerID="f45a1cca0160ae153dd786f349f66150a19fa61126e3f6269a55cef3a8dcff8f" exitCode=0 Dec 10 11:07:53 crc kubenswrapper[4682]: I1210 11:07:53.521064 4682 generic.go:334] "Generic (PLEG): container finished" podID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerID="63b289b1a5d53d5c723db14e06dc94641c0f7a3836a69fb28c73866c4915a3e8" exitCode=0 Dec 10 11:07:53 crc kubenswrapper[4682]: I1210 11:07:53.520815 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1067eae2-6d09-419f-8a0d-6f79de303aea","Type":"ContainerDied","Data":"f45a1cca0160ae153dd786f349f66150a19fa61126e3f6269a55cef3a8dcff8f"} Dec 10 11:07:53 crc kubenswrapper[4682]: I1210 11:07:53.521101 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1067eae2-6d09-419f-8a0d-6f79de303aea","Type":"ContainerDied","Data":"63b289b1a5d53d5c723db14e06dc94641c0f7a3836a69fb28c73866c4915a3e8"} Dec 10 11:07:54 crc kubenswrapper[4682]: I1210 11:07:54.168244 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.165:5353: connect: connection refused" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.266631 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.278441 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.298715 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402098 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2k7l\" (UniqueName: \"kubernetes.io/projected/cae3bbd4-4d3b-4cce-969d-6a742664664e-kube-api-access-r2k7l\") pod \"cae3bbd4-4d3b-4cce-969d-6a742664664e\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402152 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-config\") pod \"9483a109-197b-41a8-94ee-498bee3a67eb\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402246 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-scripts\") pod \"1067eae2-6d09-419f-8a0d-6f79de303aea\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402297 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data-custom\") pod \"1067eae2-6d09-419f-8a0d-6f79de303aea\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402348 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-svc\") pod \"9483a109-197b-41a8-94ee-498bee3a67eb\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402381 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-config\") pod \"cae3bbd4-4d3b-4cce-969d-6a742664664e\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402430 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-swift-storage-0\") pod \"9483a109-197b-41a8-94ee-498bee3a67eb\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402456 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1067eae2-6d09-419f-8a0d-6f79de303aea-etc-machine-id\") pod \"1067eae2-6d09-419f-8a0d-6f79de303aea\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402496 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ms9kj\" (UniqueName: \"kubernetes.io/projected/9483a109-197b-41a8-94ee-498bee3a67eb-kube-api-access-ms9kj\") pod \"9483a109-197b-41a8-94ee-498bee3a67eb\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402528 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-combined-ca-bundle\") pod \"cae3bbd4-4d3b-4cce-969d-6a742664664e\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402571 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-nb\") pod \"9483a109-197b-41a8-94ee-498bee3a67eb\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402606 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data\") pod \"1067eae2-6d09-419f-8a0d-6f79de303aea\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402633 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-sb\") pod \"9483a109-197b-41a8-94ee-498bee3a67eb\" (UID: \"9483a109-197b-41a8-94ee-498bee3a67eb\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402655 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-combined-ca-bundle\") pod \"1067eae2-6d09-419f-8a0d-6f79de303aea\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402673 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-ovndb-tls-certs\") pod \"cae3bbd4-4d3b-4cce-969d-6a742664664e\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402708 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gg94t\" (UniqueName: \"kubernetes.io/projected/1067eae2-6d09-419f-8a0d-6f79de303aea-kube-api-access-gg94t\") pod \"1067eae2-6d09-419f-8a0d-6f79de303aea\" (UID: \"1067eae2-6d09-419f-8a0d-6f79de303aea\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402795 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-httpd-config\") pod \"cae3bbd4-4d3b-4cce-969d-6a742664664e\" (UID: \"cae3bbd4-4d3b-4cce-969d-6a742664664e\") " Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.402928 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1067eae2-6d09-419f-8a0d-6f79de303aea-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1067eae2-6d09-419f-8a0d-6f79de303aea" (UID: "1067eae2-6d09-419f-8a0d-6f79de303aea"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.403285 4682 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1067eae2-6d09-419f-8a0d-6f79de303aea-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.409149 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "cae3bbd4-4d3b-4cce-969d-6a742664664e" (UID: "cae3bbd4-4d3b-4cce-969d-6a742664664e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.412676 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1067eae2-6d09-419f-8a0d-6f79de303aea" (UID: "1067eae2-6d09-419f-8a0d-6f79de303aea"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.412793 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cae3bbd4-4d3b-4cce-969d-6a742664664e-kube-api-access-r2k7l" (OuterVolumeSpecName: "kube-api-access-r2k7l") pod "cae3bbd4-4d3b-4cce-969d-6a742664664e" (UID: "cae3bbd4-4d3b-4cce-969d-6a742664664e"). InnerVolumeSpecName "kube-api-access-r2k7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.415739 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-scripts" (OuterVolumeSpecName: "scripts") pod "1067eae2-6d09-419f-8a0d-6f79de303aea" (UID: "1067eae2-6d09-419f-8a0d-6f79de303aea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.418489 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1067eae2-6d09-419f-8a0d-6f79de303aea-kube-api-access-gg94t" (OuterVolumeSpecName: "kube-api-access-gg94t") pod "1067eae2-6d09-419f-8a0d-6f79de303aea" (UID: "1067eae2-6d09-419f-8a0d-6f79de303aea"). InnerVolumeSpecName "kube-api-access-gg94t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.433155 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9483a109-197b-41a8-94ee-498bee3a67eb-kube-api-access-ms9kj" (OuterVolumeSpecName: "kube-api-access-ms9kj") pod "9483a109-197b-41a8-94ee-498bee3a67eb" (UID: "9483a109-197b-41a8-94ee-498bee3a67eb"). InnerVolumeSpecName "kube-api-access-ms9kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.503617 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1067eae2-6d09-419f-8a0d-6f79de303aea" (UID: "1067eae2-6d09-419f-8a0d-6f79de303aea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505247 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505280 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gg94t\" (UniqueName: \"kubernetes.io/projected/1067eae2-6d09-419f-8a0d-6f79de303aea-kube-api-access-gg94t\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505296 4682 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505308 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2k7l\" (UniqueName: \"kubernetes.io/projected/cae3bbd4-4d3b-4cce-969d-6a742664664e-kube-api-access-r2k7l\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505321 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505333 4682 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.505344 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ms9kj\" (UniqueName: \"kubernetes.io/projected/9483a109-197b-41a8-94ee-498bee3a67eb-kube-api-access-ms9kj\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.545193 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9483a109-197b-41a8-94ee-498bee3a67eb" (UID: "9483a109-197b-41a8-94ee-498bee3a67eb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.547792 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9483a109-197b-41a8-94ee-498bee3a67eb" (UID: "9483a109-197b-41a8-94ee-498bee3a67eb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.590297 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9483a109-197b-41a8-94ee-498bee3a67eb" (UID: "9483a109-197b-41a8-94ee-498bee3a67eb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.591809 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9483a109-197b-41a8-94ee-498bee3a67eb" (UID: "9483a109-197b-41a8-94ee-498bee3a67eb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.591935 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1067eae2-6d09-419f-8a0d-6f79de303aea","Type":"ContainerDied","Data":"8b6b9781bc137d853db0bbb5d91ecb82b3ad623d2b73df9fcc2577759a51da21"} Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.591975 4682 scope.go:117] "RemoveContainer" containerID="f45a1cca0160ae153dd786f349f66150a19fa61126e3f6269a55cef3a8dcff8f" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.591997 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.593450 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cae3bbd4-4d3b-4cce-969d-6a742664664e" (UID: "cae3bbd4-4d3b-4cce-969d-6a742664664e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.593705 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-config" (OuterVolumeSpecName: "config") pod "cae3bbd4-4d3b-4cce-969d-6a742664664e" (UID: "cae3bbd4-4d3b-4cce-969d-6a742664664e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.603094 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-config" (OuterVolumeSpecName: "config") pod "9483a109-197b-41a8-94ee-498bee3a67eb" (UID: "9483a109-197b-41a8-94ee-498bee3a67eb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607126 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607153 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607162 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607171 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607179 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607187 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.607194 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9483a109-197b-41a8-94ee-498bee3a67eb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.608661 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" event={"ID":"9483a109-197b-41a8-94ee-498bee3a67eb","Type":"ContainerDied","Data":"2faaa8fa3f6d8fba11b1292294db2a2b2d82dba1797cea49278aecd0464daca9"} Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.608749 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-thrpw" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.617167 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-74cb8f8cb4-8fg27" event={"ID":"cae3bbd4-4d3b-4cce-969d-6a742664664e","Type":"ContainerDied","Data":"3d93b711e61c35b34f104aea7e501e0de2d2abc514c7f76f57cbafefc317111b"} Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.617257 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-74cb8f8cb4-8fg27" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.625070 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "cae3bbd4-4d3b-4cce-969d-6a742664664e" (UID: "cae3bbd4-4d3b-4cce-969d-6a742664664e"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.660783 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data" (OuterVolumeSpecName: "config-data") pod "1067eae2-6d09-419f-8a0d-6f79de303aea" (UID: "1067eae2-6d09-419f-8a0d-6f79de303aea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.663546 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-thrpw"] Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.672775 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-thrpw"] Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.709380 4682 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cae3bbd4-4d3b-4cce-969d-6a742664664e-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.709419 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1067eae2-6d09-419f-8a0d-6f79de303aea-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.961261 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.983586 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.995591 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:55 crc kubenswrapper[4682]: E1210 11:07:55.996385 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-api" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996416 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-api" Dec 10 11:07:55 crc kubenswrapper[4682]: E1210 11:07:55.996434 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="init" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996443 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="init" Dec 10 11:07:55 crc kubenswrapper[4682]: E1210 11:07:55.996487 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="dnsmasq-dns" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996497 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="dnsmasq-dns" Dec 10 11:07:55 crc kubenswrapper[4682]: E1210 11:07:55.996507 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="cinder-scheduler" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996514 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="cinder-scheduler" Dec 10 11:07:55 crc kubenswrapper[4682]: E1210 11:07:55.996526 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-httpd" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996535 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-httpd" Dec 10 11:07:55 crc kubenswrapper[4682]: E1210 11:07:55.996544 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="probe" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996554 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="probe" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996790 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="cinder-scheduler" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996818 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-httpd" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996831 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" containerName="neutron-api" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996841 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" containerName="probe" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.996860 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" containerName="dnsmasq-dns" Dec 10 11:07:55 crc kubenswrapper[4682]: I1210 11:07:55.998211 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.005068 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.013015 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-74cb8f8cb4-8fg27"] Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.034294 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-74cb8f8cb4-8fg27"] Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.044286 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.123439 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.123582 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.123634 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9860c609-4b4e-4bdd-a72c-6760a86226b7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.123679 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-scripts\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.123705 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-config-data\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.123782 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdvg6\" (UniqueName: \"kubernetes.io/projected/9860c609-4b4e-4bdd-a72c-6760a86226b7-kube-api-access-wdvg6\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.224921 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdvg6\" (UniqueName: \"kubernetes.io/projected/9860c609-4b4e-4bdd-a72c-6760a86226b7-kube-api-access-wdvg6\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.224970 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.225018 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.225045 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9860c609-4b4e-4bdd-a72c-6760a86226b7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.225086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-scripts\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.225110 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-config-data\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.225923 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9860c609-4b4e-4bdd-a72c-6760a86226b7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.228712 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-scripts\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.228720 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.246107 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-config-data\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.249003 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9860c609-4b4e-4bdd-a72c-6760a86226b7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.250287 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdvg6\" (UniqueName: \"kubernetes.io/projected/9860c609-4b4e-4bdd-a72c-6760a86226b7-kube-api-access-wdvg6\") pod \"cinder-scheduler-0\" (UID: \"9860c609-4b4e-4bdd-a72c-6760a86226b7\") " pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.331413 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.370712 4682 scope.go:117] "RemoveContainer" containerID="63b289b1a5d53d5c723db14e06dc94641c0f7a3836a69fb28c73866c4915a3e8" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.407254 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1067eae2-6d09-419f-8a0d-6f79de303aea" path="/var/lib/kubelet/pods/1067eae2-6d09-419f-8a0d-6f79de303aea/volumes" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.408179 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9483a109-197b-41a8-94ee-498bee3a67eb" path="/var/lib/kubelet/pods/9483a109-197b-41a8-94ee-498bee3a67eb/volumes" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.408903 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cae3bbd4-4d3b-4cce-969d-6a742664664e" path="/var/lib/kubelet/pods/cae3bbd4-4d3b-4cce-969d-6a742664664e/volumes" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.855691 4682 scope.go:117] "RemoveContainer" containerID="1b597744ef7ccfe50dac0a258b9776fc9871100f64e0c5ed0d9ad8f27681f900" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.856640 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-storageinit-bfwj8"] Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.936356 4682 scope.go:117] "RemoveContainer" containerID="f5cb586da4d0b375318f417e0458181ec5a3b4df6956e469b1a119d8f097ec9c" Dec 10 11:07:56 crc kubenswrapper[4682]: I1210 11:07:56.992687 4682 scope.go:117] "RemoveContainer" containerID="43a5b4fc146dbd44eb112f3dde542dbafb8cebe63e489137121408672242f4a2" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.054868 4682 scope.go:117] "RemoveContainer" containerID="faa278a4b836a3b718a93da59d3922dc7ee5f150d84552d6723475d5a46656b6" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.366765 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.622299 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.670747 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9860c609-4b4e-4bdd-a72c-6760a86226b7","Type":"ContainerStarted","Data":"525ff0c9450284db8665439736c2d490486658883347d4c8ae4ef01b54e2ee7b"} Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.674659 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-bfwj8" event={"ID":"33627f7b-af0a-495f-b5cb-ed10c47ed17d","Type":"ContainerStarted","Data":"e73667f467359e8c2f47d2b5b24aea120bcdc4334e2d094517c6b2c67b3d7f91"} Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.674710 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-bfwj8" event={"ID":"33627f7b-af0a-495f-b5cb-ed10c47ed17d","Type":"ContainerStarted","Data":"25be977357baa13fcff0d451e75a8e858e05ed06cec4bd4384a9b207d1f9f473"} Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.680691 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerStarted","Data":"71c739263f5eccd7f700003a2b9b310844ec1f7055392bf333e1e401d865c0e6"} Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.680832 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-central-agent" containerID="cri-o://f8aab96b0f7d4ac6c1f8c39574398e911e23efeeaffa56928b0213e6d14e0cea" gracePeriod=30 Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.680902 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="proxy-httpd" containerID="cri-o://71c739263f5eccd7f700003a2b9b310844ec1f7055392bf333e1e401d865c0e6" gracePeriod=30 Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.680937 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="sg-core" containerID="cri-o://214b1a88f0e22226c669b12ca77adfb8e5040a5885d9ec8d6c54c4d7f8e575b3" gracePeriod=30 Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.680950 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.680969 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-notification-agent" containerID="cri-o://f7d29c4e4534990c2c1ebf08c35f64ff5483a7bfbbc4ae031f298eda3e8928c8" gracePeriod=30 Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.699127 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f48d9bb94-4d9lk" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.704676 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-storageinit-bfwj8" podStartSLOduration=8.704654448 podStartE2EDuration="8.704654448s" podCreationTimestamp="2025-12-10 11:07:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:07:57.695110699 +0000 UTC m=+1358.015321449" watchObservedRunningTime="2025-12-10 11:07:57.704654448 +0000 UTC m=+1358.024865208" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.727363 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.96417444 podStartE2EDuration="1m13.727345759s" podCreationTimestamp="2025-12-10 11:06:44 +0000 UTC" firstStartedPulling="2025-12-10 11:06:46.178357048 +0000 UTC m=+1286.498567798" lastFinishedPulling="2025-12-10 11:07:56.941528367 +0000 UTC m=+1357.261739117" observedRunningTime="2025-12-10 11:07:57.716690485 +0000 UTC m=+1358.036901235" watchObservedRunningTime="2025-12-10 11:07:57.727345759 +0000 UTC m=+1358.047556509" Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.821651 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7b5d4b7b66-h8tzl"] Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.821890 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api-log" containerID="cri-o://fc347e3b3bc31dcf54cec7aca8d90c8d11ce0b8b41b9e39234e5678f80df584f" gracePeriod=30 Dec 10 11:07:57 crc kubenswrapper[4682]: I1210 11:07:57.822456 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api" containerID="cri-o://40094886e3e24b9d038d32b1037b0dc3d88fc91eaa27596d28421f3012cfea6e" gracePeriod=30 Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.719227 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9860c609-4b4e-4bdd-a72c-6760a86226b7","Type":"ContainerStarted","Data":"1b37fa1cbf1b9fbfea6a648a66bbea84ca6e606f08c14edba2e1e1ba7b401d0d"} Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.726397 4682 generic.go:334] "Generic (PLEG): container finished" podID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerID="fc347e3b3bc31dcf54cec7aca8d90c8d11ce0b8b41b9e39234e5678f80df584f" exitCode=143 Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.726488 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" event={"ID":"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6","Type":"ContainerDied","Data":"fc347e3b3bc31dcf54cec7aca8d90c8d11ce0b8b41b9e39234e5678f80df584f"} Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.730707 4682 generic.go:334] "Generic (PLEG): container finished" podID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerID="71c739263f5eccd7f700003a2b9b310844ec1f7055392bf333e1e401d865c0e6" exitCode=0 Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.730739 4682 generic.go:334] "Generic (PLEG): container finished" podID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerID="214b1a88f0e22226c669b12ca77adfb8e5040a5885d9ec8d6c54c4d7f8e575b3" exitCode=2 Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.730753 4682 generic.go:334] "Generic (PLEG): container finished" podID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerID="f8aab96b0f7d4ac6c1f8c39574398e911e23efeeaffa56928b0213e6d14e0cea" exitCode=0 Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.730803 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerDied","Data":"71c739263f5eccd7f700003a2b9b310844ec1f7055392bf333e1e401d865c0e6"} Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.730862 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerDied","Data":"214b1a88f0e22226c669b12ca77adfb8e5040a5885d9ec8d6c54c4d7f8e575b3"} Dec 10 11:07:58 crc kubenswrapper[4682]: I1210 11:07:58.730879 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerDied","Data":"f8aab96b0f7d4ac6c1f8c39574398e911e23efeeaffa56928b0213e6d14e0cea"} Dec 10 11:07:59 crc kubenswrapper[4682]: I1210 11:07:59.456401 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 10 11:08:00 crc kubenswrapper[4682]: I1210 11:08:00.750443 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9860c609-4b4e-4bdd-a72c-6760a86226b7","Type":"ContainerStarted","Data":"32730e2ad5cfc0cb3d289da21f06895920712b5bbd062d9e91ce69de889bc833"} Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.331518 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.476395 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.175:9311/healthcheck\": read tcp 10.217.0.2:32784->10.217.0.175:9311: read: connection reset by peer" Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.476512 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.175:9311/healthcheck\": read tcp 10.217.0.2:32772->10.217.0.175:9311: read: connection reset by peer" Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.771846 4682 generic.go:334] "Generic (PLEG): container finished" podID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerID="40094886e3e24b9d038d32b1037b0dc3d88fc91eaa27596d28421f3012cfea6e" exitCode=0 Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.772157 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" event={"ID":"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6","Type":"ContainerDied","Data":"40094886e3e24b9d038d32b1037b0dc3d88fc91eaa27596d28421f3012cfea6e"} Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.774638 4682 generic.go:334] "Generic (PLEG): container finished" podID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerID="f7d29c4e4534990c2c1ebf08c35f64ff5483a7bfbbc4ae031f298eda3e8928c8" exitCode=0 Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.775634 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerDied","Data":"f7d29c4e4534990c2c1ebf08c35f64ff5483a7bfbbc4ae031f298eda3e8928c8"} Dec 10 11:08:01 crc kubenswrapper[4682]: I1210 11:08:01.998169 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.033086 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=7.033063196 podStartE2EDuration="7.033063196s" podCreationTimestamp="2025-12-10 11:07:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:00.808052106 +0000 UTC m=+1361.128262876" watchObservedRunningTime="2025-12-10 11:08:02.033063196 +0000 UTC m=+1362.353273956" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.056310 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-logs\") pod \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.056515 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nl4h6\" (UniqueName: \"kubernetes.io/projected/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-kube-api-access-nl4h6\") pod \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.056551 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data\") pod \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.056586 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data-custom\") pod \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.056623 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-combined-ca-bundle\") pod \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\" (UID: \"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.057009 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-logs" (OuterVolumeSpecName: "logs") pod "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" (UID: "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.057202 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.067914 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-kube-api-access-nl4h6" (OuterVolumeSpecName: "kube-api-access-nl4h6") pod "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" (UID: "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6"). InnerVolumeSpecName "kube-api-access-nl4h6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.069450 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" (UID: "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.100263 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" (UID: "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.137547 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.159484 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nl4h6\" (UniqueName: \"kubernetes.io/projected/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-kube-api-access-nl4h6\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.159509 4682 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.159518 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.166059 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data" (OuterVolumeSpecName: "config-data") pod "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" (UID: "49a06892-4c0a-4fa8-8703-cdb84d3ff4d6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.260978 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-run-httpd\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261308 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-log-httpd\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261428 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261464 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-combined-ca-bundle\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261528 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-scripts\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261550 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twnj7\" (UniqueName: \"kubernetes.io/projected/97c96a0f-0978-472b-b04a-6b1f0850b97c-kube-api-access-twnj7\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261668 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261702 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-config-data\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.261741 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-sg-core-conf-yaml\") pod \"97c96a0f-0978-472b-b04a-6b1f0850b97c\" (UID: \"97c96a0f-0978-472b-b04a-6b1f0850b97c\") " Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.262284 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.262305 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.262316 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/97c96a0f-0978-472b-b04a-6b1f0850b97c-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.264705 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97c96a0f-0978-472b-b04a-6b1f0850b97c-kube-api-access-twnj7" (OuterVolumeSpecName: "kube-api-access-twnj7") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "kube-api-access-twnj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.266501 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-scripts" (OuterVolumeSpecName: "scripts") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.286695 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.352858 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.366830 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.366865 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.366875 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.366885 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twnj7\" (UniqueName: \"kubernetes.io/projected/97c96a0f-0978-472b-b04a-6b1f0850b97c-kube-api-access-twnj7\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.370709 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-config-data" (OuterVolumeSpecName: "config-data") pod "97c96a0f-0978-472b-b04a-6b1f0850b97c" (UID: "97c96a0f-0978-472b-b04a-6b1f0850b97c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.469295 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c96a0f-0978-472b-b04a-6b1f0850b97c-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.787015 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.787022 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7b5d4b7b66-h8tzl" event={"ID":"49a06892-4c0a-4fa8-8703-cdb84d3ff4d6","Type":"ContainerDied","Data":"77cab110191949cca7f1063e7ad4247b2c07f3aaedb22d74ee2c3cdc0d3cd654"} Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.787894 4682 scope.go:117] "RemoveContainer" containerID="40094886e3e24b9d038d32b1037b0dc3d88fc91eaa27596d28421f3012cfea6e" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.790262 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"97c96a0f-0978-472b-b04a-6b1f0850b97c","Type":"ContainerDied","Data":"0a50aae77d9c2c4e19b20e145e9cf51614a96914069846326cf9ffa10c59608b"} Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.790461 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.794280 4682 generic.go:334] "Generic (PLEG): container finished" podID="33627f7b-af0a-495f-b5cb-ed10c47ed17d" containerID="e73667f467359e8c2f47d2b5b24aea120bcdc4334e2d094517c6b2c67b3d7f91" exitCode=0 Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.794687 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-bfwj8" event={"ID":"33627f7b-af0a-495f-b5cb-ed10c47ed17d","Type":"ContainerDied","Data":"e73667f467359e8c2f47d2b5b24aea120bcdc4334e2d094517c6b2c67b3d7f91"} Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.831542 4682 scope.go:117] "RemoveContainer" containerID="fc347e3b3bc31dcf54cec7aca8d90c8d11ce0b8b41b9e39234e5678f80df584f" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.844578 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7b5d4b7b66-h8tzl"] Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.869541 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7b5d4b7b66-h8tzl"] Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.883139 4682 scope.go:117] "RemoveContainer" containerID="71c739263f5eccd7f700003a2b9b310844ec1f7055392bf333e1e401d865c0e6" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.898396 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.909647 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.927480 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:02 crc kubenswrapper[4682]: E1210 11:08:02.928068 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-central-agent" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928088 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-central-agent" Dec 10 11:08:02 crc kubenswrapper[4682]: E1210 11:08:02.928098 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-notification-agent" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928105 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-notification-agent" Dec 10 11:08:02 crc kubenswrapper[4682]: E1210 11:08:02.928121 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="proxy-httpd" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928127 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="proxy-httpd" Dec 10 11:08:02 crc kubenswrapper[4682]: E1210 11:08:02.928159 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="sg-core" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928166 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="sg-core" Dec 10 11:08:02 crc kubenswrapper[4682]: E1210 11:08:02.928174 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api-log" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928180 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api-log" Dec 10 11:08:02 crc kubenswrapper[4682]: E1210 11:08:02.928199 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928204 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928387 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="sg-core" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928410 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-central-agent" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928422 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928434 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" containerName="barbican-api-log" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928444 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="ceilometer-notification-agent" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.928457 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" containerName="proxy-httpd" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.930425 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.933983 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.934255 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.960679 4682 scope.go:117] "RemoveContainer" containerID="214b1a88f0e22226c669b12ca77adfb8e5040a5885d9ec8d6c54c4d7f8e575b3" Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.966770 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:02 crc kubenswrapper[4682]: I1210 11:08:02.996685 4682 scope.go:117] "RemoveContainer" containerID="f7d29c4e4534990c2c1ebf08c35f64ff5483a7bfbbc4ae031f298eda3e8928c8" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.022779 4682 scope.go:117] "RemoveContainer" containerID="f8aab96b0f7d4ac6c1f8c39574398e911e23efeeaffa56928b0213e6d14e0cea" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.081788 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-run-httpd\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.081829 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-config-data\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.081912 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-log-httpd\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.081966 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.082027 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.082061 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvb25\" (UniqueName: \"kubernetes.io/projected/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-kube-api-access-dvb25\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.082089 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-scripts\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184065 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-run-httpd\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184108 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-config-data\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184152 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-log-httpd\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184197 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184255 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184288 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvb25\" (UniqueName: \"kubernetes.io/projected/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-kube-api-access-dvb25\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.184315 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-scripts\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.185301 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-log-httpd\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.185454 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-run-httpd\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.189228 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-scripts\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.190136 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-config-data\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.197846 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.202218 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.206856 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvb25\" (UniqueName: \"kubernetes.io/projected/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-kube-api-access-dvb25\") pod \"ceilometer-0\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.270311 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.785746 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:03 crc kubenswrapper[4682]: I1210 11:08:03.804875 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerStarted","Data":"f03059924a6ae4f29e669101061d0208b57b8471858d68c82cfe28daae818092"} Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.167379 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.307796 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-config-data\") pod \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.308144 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-scripts\") pod \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.308287 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4v89\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-kube-api-access-l4v89\") pod \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.308362 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-certs\") pod \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.308446 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-combined-ca-bundle\") pod \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\" (UID: \"33627f7b-af0a-495f-b5cb-ed10c47ed17d\") " Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.313404 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-kube-api-access-l4v89" (OuterVolumeSpecName: "kube-api-access-l4v89") pod "33627f7b-af0a-495f-b5cb-ed10c47ed17d" (UID: "33627f7b-af0a-495f-b5cb-ed10c47ed17d"). InnerVolumeSpecName "kube-api-access-l4v89". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.313426 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-scripts" (OuterVolumeSpecName: "scripts") pod "33627f7b-af0a-495f-b5cb-ed10c47ed17d" (UID: "33627f7b-af0a-495f-b5cb-ed10c47ed17d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.316102 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-certs" (OuterVolumeSpecName: "certs") pod "33627f7b-af0a-495f-b5cb-ed10c47ed17d" (UID: "33627f7b-af0a-495f-b5cb-ed10c47ed17d"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.339731 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33627f7b-af0a-495f-b5cb-ed10c47ed17d" (UID: "33627f7b-af0a-495f-b5cb-ed10c47ed17d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.341879 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-config-data" (OuterVolumeSpecName: "config-data") pod "33627f7b-af0a-495f-b5cb-ed10c47ed17d" (UID: "33627f7b-af0a-495f-b5cb-ed10c47ed17d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.393143 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49a06892-4c0a-4fa8-8703-cdb84d3ff4d6" path="/var/lib/kubelet/pods/49a06892-4c0a-4fa8-8703-cdb84d3ff4d6/volumes" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.394331 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97c96a0f-0978-472b-b04a-6b1f0850b97c" path="/var/lib/kubelet/pods/97c96a0f-0978-472b-b04a-6b1f0850b97c/volumes" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.411428 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.411484 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4v89\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-kube-api-access-l4v89\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.411499 4682 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/33627f7b-af0a-495f-b5cb-ed10c47ed17d-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.411511 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.411523 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33627f7b-af0a-495f-b5cb-ed10c47ed17d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.818158 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-storageinit-bfwj8" event={"ID":"33627f7b-af0a-495f-b5cb-ed10c47ed17d","Type":"ContainerDied","Data":"25be977357baa13fcff0d451e75a8e858e05ed06cec4bd4384a9b207d1f9f473"} Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.818204 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25be977357baa13fcff0d451e75a8e858e05ed06cec4bd4384a9b207d1f9f473" Dec 10 11:08:04 crc kubenswrapper[4682]: I1210 11:08:04.818231 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-storageinit-bfwj8" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.066749 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:05 crc kubenswrapper[4682]: E1210 11:08:05.067304 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33627f7b-af0a-495f-b5cb-ed10c47ed17d" containerName="cloudkitty-storageinit" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.067320 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="33627f7b-af0a-495f-b5cb-ed10c47ed17d" containerName="cloudkitty-storageinit" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.067523 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="33627f7b-af0a-495f-b5cb-ed10c47ed17d" containerName="cloudkitty-storageinit" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.068211 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.072259 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-scripts" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.072598 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-client-internal" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.072751 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-cloudkitty-dockercfg-6svbf" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.072870 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-config-data" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.073429 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-proc-config-data" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.092620 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.132950 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-kxg7x"] Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.134455 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.224214 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-kxg7x"] Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233528 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-scripts\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233580 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-nb\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233606 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwlnp\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-kube-api-access-qwlnp\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233629 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-swift-storage-0\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233762 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233787 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-svc\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233825 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-sb\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233896 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-config\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233924 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxbkg\" (UniqueName: \"kubernetes.io/projected/53824719-3472-4d94-be91-5a1f3176e34d-kube-api-access-sxbkg\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233947 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.233996 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.234016 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-certs\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.286829 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.290437 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.301916 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-api-config-data" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.335935 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-config\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.335996 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxbkg\" (UniqueName: \"kubernetes.io/projected/53824719-3472-4d94-be91-5a1f3176e34d-kube-api-access-sxbkg\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336022 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336072 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336094 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-certs\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336148 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-scripts\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336177 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-nb\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336198 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwlnp\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-kube-api-access-qwlnp\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336219 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-swift-storage-0\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336282 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336299 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-svc\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.336323 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-sb\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.342278 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-config\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.344157 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-svc\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.342278 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-swift-storage-0\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.342292 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-nb\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.349113 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.349805 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-certs\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.353046 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.356658 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.362019 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-scripts\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.365730 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-sb\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.367360 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwlnp\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-kube-api-access-qwlnp\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.372556 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxbkg\" (UniqueName: \"kubernetes.io/projected/53824719-3472-4d94-be91-5a1f3176e34d-kube-api-access-sxbkg\") pod \"dnsmasq-dns-86d9875b97-kxg7x\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.372601 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.391638 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.438258 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.446184 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c6da2446-428e-4216-a1b3-9d686ccfce42-logs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.447760 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.447882 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzfrs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-kube-api-access-lzfrs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.447917 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.448074 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-certs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.448136 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-scripts\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.485044 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550360 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550402 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c6da2446-428e-4216-a1b3-9d686ccfce42-logs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550488 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550525 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzfrs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-kube-api-access-lzfrs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550540 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550598 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-certs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.550626 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-scripts\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.552774 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c6da2446-428e-4216-a1b3-9d686ccfce42-logs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.558655 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-certs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.559404 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.571255 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-scripts\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.573238 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.574790 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzfrs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-kube-api-access-lzfrs\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.587461 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data\") pod \"cloudkitty-api-0\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.645948 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.843773 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerStarted","Data":"744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69"} Dec 10 11:08:05 crc kubenswrapper[4682]: I1210 11:08:05.951575 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.132506 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-kxg7x"] Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.268794 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:06 crc kubenswrapper[4682]: W1210 11:08:06.270170 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6da2446_428e_4216_a1b3_9d686ccfce42.slice/crio-6a7ce02375504122b1a35a8d2fe4564a0e52d352da8673b583361b961416c346 WatchSource:0}: Error finding container 6a7ce02375504122b1a35a8d2fe4564a0e52d352da8673b583361b961416c346: Status 404 returned error can't find the container with id 6a7ce02375504122b1a35a8d2fe4564a0e52d352da8673b583361b961416c346 Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.478811 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.479190 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.663568 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.890807 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c6da2446-428e-4216-a1b3-9d686ccfce42","Type":"ContainerStarted","Data":"de6967c48fe62cfc373713ce1d166697ae57d1f1c2c66460badf6dec0f8af141"} Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.891163 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c6da2446-428e-4216-a1b3-9d686ccfce42","Type":"ContainerStarted","Data":"6a7ce02375504122b1a35a8d2fe4564a0e52d352da8673b583361b961416c346"} Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.908971 4682 generic.go:334] "Generic (PLEG): container finished" podID="53824719-3472-4d94-be91-5a1f3176e34d" containerID="339c9d985c738cfba338b2d80c216ff6ee2eb2eea81db6d073a77a17ec310825" exitCode=0 Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.909071 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" event={"ID":"53824719-3472-4d94-be91-5a1f3176e34d","Type":"ContainerDied","Data":"339c9d985c738cfba338b2d80c216ff6ee2eb2eea81db6d073a77a17ec310825"} Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.909098 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" event={"ID":"53824719-3472-4d94-be91-5a1f3176e34d","Type":"ContainerStarted","Data":"a55b683228cbbc86462e0a9911160fd91c6c4b8d0838effa934193820bf9a00f"} Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.921456 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerStarted","Data":"9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae"} Dec 10 11:08:06 crc kubenswrapper[4682]: I1210 11:08:06.923040 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"21cd26d9-3c93-42a2-b33a-c6c1a532806c","Type":"ContainerStarted","Data":"b9d3f80587ff5405f092d905224a99a0532bb30d8af8f12c9b5579ace91ec2dd"} Dec 10 11:08:07 crc kubenswrapper[4682]: I1210 11:08:07.947496 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c6da2446-428e-4216-a1b3-9d686ccfce42","Type":"ContainerStarted","Data":"bf5d6e180ab34714450401186af661b8196f0e97de99136fd332ff7010b948e0"} Dec 10 11:08:07 crc kubenswrapper[4682]: I1210 11:08:07.947844 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-api-0" Dec 10 11:08:07 crc kubenswrapper[4682]: I1210 11:08:07.953512 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:07 crc kubenswrapper[4682]: I1210 11:08:07.955145 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerStarted","Data":"2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98"} Dec 10 11:08:07 crc kubenswrapper[4682]: I1210 11:08:07.978033 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-api-0" podStartSLOduration=2.978012732 podStartE2EDuration="2.978012732s" podCreationTimestamp="2025-12-10 11:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:07.967729824 +0000 UTC m=+1368.287940594" watchObservedRunningTime="2025-12-10 11:08:07.978012732 +0000 UTC m=+1368.298223482" Dec 10 11:08:07 crc kubenswrapper[4682]: I1210 11:08:07.999567 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" podStartSLOduration=2.999546046 podStartE2EDuration="2.999546046s" podCreationTimestamp="2025-12-10 11:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:07.989565837 +0000 UTC m=+1368.309776607" watchObservedRunningTime="2025-12-10 11:08:07.999546046 +0000 UTC m=+1368.319756796" Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.017420 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-proc-0" podStartSLOduration=1.4039506990000001 podStartE2EDuration="3.017401631s" podCreationTimestamp="2025-12-10 11:08:05 +0000 UTC" firstStartedPulling="2025-12-10 11:08:06.006862015 +0000 UTC m=+1366.327072765" lastFinishedPulling="2025-12-10 11:08:07.620312947 +0000 UTC m=+1367.940523697" observedRunningTime="2025-12-10 11:08:08.008248417 +0000 UTC m=+1368.328459177" watchObservedRunningTime="2025-12-10 11:08:08.017401631 +0000 UTC m=+1368.337612371" Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.441332 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.484674 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.896025 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.965656 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"21cd26d9-3c93-42a2-b33a-c6c1a532806c","Type":"ContainerStarted","Data":"309f22141cec94b11a3b3c213ab0c7374559670c21a09884128a4b22676026ef"} Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.965743 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cloudkitty-proc-0" podUID="21cd26d9-3c93-42a2-b33a-c6c1a532806c" containerName="cloudkitty-proc" containerID="cri-o://309f22141cec94b11a3b3c213ab0c7374559670c21a09884128a4b22676026ef" gracePeriod=30 Dec 10 11:08:08 crc kubenswrapper[4682]: I1210 11:08:08.977959 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" event={"ID":"53824719-3472-4d94-be91-5a1f3176e34d","Type":"ContainerStarted","Data":"e383d8a5d9567fcd4199ed7af6e99535d846658fb64dccc90e724e043afa5373"} Dec 10 11:08:09 crc kubenswrapper[4682]: I1210 11:08:09.881374 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6548f86b64-snz6f" Dec 10 11:08:10 crc kubenswrapper[4682]: I1210 11:08:10.003661 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cloudkitty-api-0" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api-log" containerID="cri-o://de6967c48fe62cfc373713ce1d166697ae57d1f1c2c66460badf6dec0f8af141" gracePeriod=30 Dec 10 11:08:10 crc kubenswrapper[4682]: I1210 11:08:10.004103 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cloudkitty-api-0" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api" containerID="cri-o://bf5d6e180ab34714450401186af661b8196f0e97de99136fd332ff7010b948e0" gracePeriod=30 Dec 10 11:08:10 crc kubenswrapper[4682]: I1210 11:08:10.997953 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-548d5df8d4-8fcdl" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.076768 4682 generic.go:334] "Generic (PLEG): container finished" podID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerID="bf5d6e180ab34714450401186af661b8196f0e97de99136fd332ff7010b948e0" exitCode=0 Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.076805 4682 generic.go:334] "Generic (PLEG): container finished" podID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerID="de6967c48fe62cfc373713ce1d166697ae57d1f1c2c66460badf6dec0f8af141" exitCode=143 Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.076880 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c6da2446-428e-4216-a1b3-9d686ccfce42","Type":"ContainerDied","Data":"bf5d6e180ab34714450401186af661b8196f0e97de99136fd332ff7010b948e0"} Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.076906 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c6da2446-428e-4216-a1b3-9d686ccfce42","Type":"ContainerDied","Data":"de6967c48fe62cfc373713ce1d166697ae57d1f1c2c66460badf6dec0f8af141"} Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.108939 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerStarted","Data":"5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15"} Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.110295 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.136629 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.105175441 podStartE2EDuration="9.136603064s" podCreationTimestamp="2025-12-10 11:08:02 +0000 UTC" firstStartedPulling="2025-12-10 11:08:03.784329194 +0000 UTC m=+1364.104539944" lastFinishedPulling="2025-12-10 11:08:09.815756817 +0000 UTC m=+1370.135967567" observedRunningTime="2025-12-10 11:08:11.126493452 +0000 UTC m=+1371.446704212" watchObservedRunningTime="2025-12-10 11:08:11.136603064 +0000 UTC m=+1371.456813824" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.280236 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420141 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-certs\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420580 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzfrs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-kube-api-access-lzfrs\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420638 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data-custom\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420664 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c6da2446-428e-4216-a1b3-9d686ccfce42-logs\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420709 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-combined-ca-bundle\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420753 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.420835 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-scripts\") pod \"c6da2446-428e-4216-a1b3-9d686ccfce42\" (UID: \"c6da2446-428e-4216-a1b3-9d686ccfce42\") " Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.422036 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6da2446-428e-4216-a1b3-9d686ccfce42-logs" (OuterVolumeSpecName: "logs") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.427661 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-kube-api-access-lzfrs" (OuterVolumeSpecName: "kube-api-access-lzfrs") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "kube-api-access-lzfrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.431218 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-certs" (OuterVolumeSpecName: "certs") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.431609 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.459217 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-scripts" (OuterVolumeSpecName: "scripts") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.464598 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data" (OuterVolumeSpecName: "config-data") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.500608 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6da2446-428e-4216-a1b3-9d686ccfce42" (UID: "c6da2446-428e-4216-a1b3-9d686ccfce42"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525905 4682 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525942 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzfrs\" (UniqueName: \"kubernetes.io/projected/c6da2446-428e-4216-a1b3-9d686ccfce42-kube-api-access-lzfrs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525959 4682 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525969 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c6da2446-428e-4216-a1b3-9d686ccfce42-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525979 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525989 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:11 crc kubenswrapper[4682]: I1210 11:08:11.525999 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6da2446-428e-4216-a1b3-9d686ccfce42-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.120575 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.130281 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c6da2446-428e-4216-a1b3-9d686ccfce42","Type":"ContainerDied","Data":"6a7ce02375504122b1a35a8d2fe4564a0e52d352da8673b583361b961416c346"} Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.130341 4682 scope.go:117] "RemoveContainer" containerID="bf5d6e180ab34714450401186af661b8196f0e97de99136fd332ff7010b948e0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.196744 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.209799 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.267168 4682 scope.go:117] "RemoveContainer" containerID="de6967c48fe62cfc373713ce1d166697ae57d1f1c2c66460badf6dec0f8af141" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.271526 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:12 crc kubenswrapper[4682]: E1210 11:08:12.271979 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.271991 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api" Dec 10 11:08:12 crc kubenswrapper[4682]: E1210 11:08:12.272016 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api-log" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.272022 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api-log" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.272210 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.272230 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" containerName="cloudkitty-api-log" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.273620 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.276935 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-api-config-data" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.277245 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-internal-svc" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.281213 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cloudkitty-public-svc" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.310524 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.359088 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.359767 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfjvc\" (UniqueName: \"kubernetes.io/projected/c414c980-13a0-4869-b74e-f9352e92e527-kube-api-access-kfjvc\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.359963 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-config-data\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.360096 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c414c980-13a0-4869-b74e-f9352e92e527-logs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.360287 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-scripts\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.360436 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-internal-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.360613 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-public-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.360722 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.360828 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c414c980-13a0-4869-b74e-f9352e92e527-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.392005 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6da2446-428e-4216-a1b3-9d686ccfce42" path="/var/lib/kubelet/pods/c6da2446-428e-4216-a1b3-9d686ccfce42/volumes" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.462788 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c414c980-13a0-4869-b74e-f9352e92e527-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.462944 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.462995 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfjvc\" (UniqueName: \"kubernetes.io/projected/c414c980-13a0-4869-b74e-f9352e92e527-kube-api-access-kfjvc\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.463051 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-config-data\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.463099 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c414c980-13a0-4869-b74e-f9352e92e527-logs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.463185 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-scripts\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.463226 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-internal-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.463280 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-public-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.463312 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.464816 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c414c980-13a0-4869-b74e-f9352e92e527-logs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.468843 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-config-data\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.469129 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-scripts\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.469310 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-internal-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.469962 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-combined-ca-bundle\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.474418 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-public-tls-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.475031 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c414c980-13a0-4869-b74e-f9352e92e527-config-data-custom\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.479631 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/c414c980-13a0-4869-b74e-f9352e92e527-certs\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.483908 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfjvc\" (UniqueName: \"kubernetes.io/projected/c414c980-13a0-4869-b74e-f9352e92e527-kube-api-access-kfjvc\") pod \"cloudkitty-api-0\" (UID: \"c414c980-13a0-4869-b74e-f9352e92e527\") " pod="openstack/cloudkitty-api-0" Dec 10 11:08:12 crc kubenswrapper[4682]: I1210 11:08:12.617333 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-api-0" Dec 10 11:08:13 crc kubenswrapper[4682]: I1210 11:08:13.182538 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-api-0"] Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.148188 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c414c980-13a0-4869-b74e-f9352e92e527","Type":"ContainerStarted","Data":"56af09d3bf95c5087bdca5b363e81fda97ba5c15231bee19e1c385622544d515"} Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.148768 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c414c980-13a0-4869-b74e-f9352e92e527","Type":"ContainerStarted","Data":"d85e509156b600415eab89cf0ddc9f5281f1c63b582f227a0be1889764bc3655"} Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.148784 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-api-0" event={"ID":"c414c980-13a0-4869-b74e-f9352e92e527","Type":"ContainerStarted","Data":"d1ad4b9a084d56e3b81138878c5848be9b7ad8f44976923879d8b9e7aa7f1fae"} Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.148813 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cloudkitty-api-0" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.194221 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-api-0" podStartSLOduration=2.194200224 podStartE2EDuration="2.194200224s" podCreationTimestamp="2025-12-10 11:08:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:14.184121253 +0000 UTC m=+1374.504332023" watchObservedRunningTime="2025-12-10 11:08:14.194200224 +0000 UTC m=+1374.514410974" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.549184 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.550755 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.560904 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.560929 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.561145 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.561532 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-5vv8v" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.718330 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6073de7e-e347-4fb0-b607-21aaf92384b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.718648 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6073de7e-e347-4fb0-b607-21aaf92384b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.718709 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6073de7e-e347-4fb0-b607-21aaf92384b1-openstack-config\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.718748 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svkpk\" (UniqueName: \"kubernetes.io/projected/6073de7e-e347-4fb0-b607-21aaf92384b1-kube-api-access-svkpk\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.820896 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6073de7e-e347-4fb0-b607-21aaf92384b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.820993 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6073de7e-e347-4fb0-b607-21aaf92384b1-openstack-config\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.821040 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svkpk\" (UniqueName: \"kubernetes.io/projected/6073de7e-e347-4fb0-b607-21aaf92384b1-kube-api-access-svkpk\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.821108 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6073de7e-e347-4fb0-b607-21aaf92384b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.821939 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6073de7e-e347-4fb0-b607-21aaf92384b1-openstack-config\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.826442 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6073de7e-e347-4fb0-b607-21aaf92384b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.826444 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6073de7e-e347-4fb0-b607-21aaf92384b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.839857 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svkpk\" (UniqueName: \"kubernetes.io/projected/6073de7e-e347-4fb0-b607-21aaf92384b1-kube-api-access-svkpk\") pod \"openstackclient\" (UID: \"6073de7e-e347-4fb0-b607-21aaf92384b1\") " pod="openstack/openstackclient" Dec 10 11:08:14 crc kubenswrapper[4682]: I1210 11:08:14.886933 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 11:08:15 crc kubenswrapper[4682]: I1210 11:08:15.472269 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 11:08:15 crc kubenswrapper[4682]: I1210 11:08:15.486671 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:08:15 crc kubenswrapper[4682]: I1210 11:08:15.541966 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-wkcwn"] Dec 10 11:08:15 crc kubenswrapper[4682]: I1210 11:08:15.542184 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerName="dnsmasq-dns" containerID="cri-o://9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c" gracePeriod=10 Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.106974 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.228061 4682 generic.go:334] "Generic (PLEG): container finished" podID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerID="9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c" exitCode=0 Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.228118 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" event={"ID":"9a8833f9-3cd6-4100-acae-847c61f5a6ed","Type":"ContainerDied","Data":"9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c"} Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.228142 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" event={"ID":"9a8833f9-3cd6-4100-acae-847c61f5a6ed","Type":"ContainerDied","Data":"f937827ef444ffb4ca27d07fa2a28206f5f51e150f1bc8db5b2f43fbdcd7740c"} Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.228160 4682 scope.go:117] "RemoveContainer" containerID="9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.228261 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-wkcwn" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.233155 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6073de7e-e347-4fb0-b607-21aaf92384b1","Type":"ContainerStarted","Data":"3bf8d710cdf53f2576776acf08393c89928ae316b5715f4f350a042eb372bd3a"} Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.258952 4682 scope.go:117] "RemoveContainer" containerID="0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.271696 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-swift-storage-0\") pod \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.272025 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbhdd\" (UniqueName: \"kubernetes.io/projected/9a8833f9-3cd6-4100-acae-847c61f5a6ed-kube-api-access-gbhdd\") pod \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.272080 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-sb\") pod \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.272178 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-config\") pod \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.272216 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-svc\") pod \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.272341 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-nb\") pod \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\" (UID: \"9a8833f9-3cd6-4100-acae-847c61f5a6ed\") " Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.300337 4682 scope.go:117] "RemoveContainer" containerID="9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.300818 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a8833f9-3cd6-4100-acae-847c61f5a6ed-kube-api-access-gbhdd" (OuterVolumeSpecName: "kube-api-access-gbhdd") pod "9a8833f9-3cd6-4100-acae-847c61f5a6ed" (UID: "9a8833f9-3cd6-4100-acae-847c61f5a6ed"). InnerVolumeSpecName "kube-api-access-gbhdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:16 crc kubenswrapper[4682]: E1210 11:08:16.301783 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c\": container with ID starting with 9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c not found: ID does not exist" containerID="9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.301815 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c"} err="failed to get container status \"9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c\": rpc error: code = NotFound desc = could not find container \"9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c\": container with ID starting with 9dec16ffe5fd4cfc4d50a403ed53d43d632bbb69203b092f2eacdc07ea15e50c not found: ID does not exist" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.301839 4682 scope.go:117] "RemoveContainer" containerID="0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a" Dec 10 11:08:16 crc kubenswrapper[4682]: E1210 11:08:16.310795 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a\": container with ID starting with 0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a not found: ID does not exist" containerID="0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.310838 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a"} err="failed to get container status \"0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a\": rpc error: code = NotFound desc = could not find container \"0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a\": container with ID starting with 0549a918de683bb946855b5d65707f94babae070564d4cfe2cc379e204f0e58a not found: ID does not exist" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.346869 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9a8833f9-3cd6-4100-acae-847c61f5a6ed" (UID: "9a8833f9-3cd6-4100-acae-847c61f5a6ed"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.361814 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-config" (OuterVolumeSpecName: "config") pod "9a8833f9-3cd6-4100-acae-847c61f5a6ed" (UID: "9a8833f9-3cd6-4100-acae-847c61f5a6ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.362854 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9a8833f9-3cd6-4100-acae-847c61f5a6ed" (UID: "9a8833f9-3cd6-4100-acae-847c61f5a6ed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.370987 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9a8833f9-3cd6-4100-acae-847c61f5a6ed" (UID: "9a8833f9-3cd6-4100-acae-847c61f5a6ed"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.375425 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbhdd\" (UniqueName: \"kubernetes.io/projected/9a8833f9-3cd6-4100-acae-847c61f5a6ed-kube-api-access-gbhdd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.375457 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.375477 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.375487 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.375498 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.407396 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9a8833f9-3cd6-4100-acae-847c61f5a6ed" (UID: "9a8833f9-3cd6-4100-acae-847c61f5a6ed"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.476991 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a8833f9-3cd6-4100-acae-847c61f5a6ed-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.574310 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-wkcwn"] Dec 10 11:08:16 crc kubenswrapper[4682]: I1210 11:08:16.583442 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-wkcwn"] Dec 10 11:08:17 crc kubenswrapper[4682]: I1210 11:08:17.847390 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.007598 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.007763 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-logs\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.007818 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-scripts\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.007852 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data-custom\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.007894 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-combined-ca-bundle\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.007952 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-etc-machine-id\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.008018 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6n8k6\" (UniqueName: \"kubernetes.io/projected/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-kube-api-access-6n8k6\") pod \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\" (UID: \"ed05abe1-c54a-4c41-9478-d5a9a0ea076c\") " Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.008744 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-logs" (OuterVolumeSpecName: "logs") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.008788 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.012958 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-scripts" (OuterVolumeSpecName: "scripts") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.013587 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.014428 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-kube-api-access-6n8k6" (OuterVolumeSpecName: "kube-api-access-6n8k6") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "kube-api-access-6n8k6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.064950 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.107616 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data" (OuterVolumeSpecName: "config-data") pod "ed05abe1-c54a-4c41-9478-d5a9a0ea076c" (UID: "ed05abe1-c54a-4c41-9478-d5a9a0ea076c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128603 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128630 4682 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128646 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128654 4682 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128664 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6n8k6\" (UniqueName: \"kubernetes.io/projected/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-kube-api-access-6n8k6\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128673 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.128684 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed05abe1-c54a-4c41-9478-d5a9a0ea076c-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.264604 4682 generic.go:334] "Generic (PLEG): container finished" podID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerID="55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726" exitCode=137 Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.265007 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ed05abe1-c54a-4c41-9478-d5a9a0ea076c","Type":"ContainerDied","Data":"55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726"} Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.265038 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ed05abe1-c54a-4c41-9478-d5a9a0ea076c","Type":"ContainerDied","Data":"dd87a27dfb8f1e0ffa68c15c89167130502f92d605a6daf74fc6ae2acf988608"} Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.265056 4682 scope.go:117] "RemoveContainer" containerID="55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.265210 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.322280 4682 scope.go:117] "RemoveContainer" containerID="2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.322405 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.330914 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.349166 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:08:18 crc kubenswrapper[4682]: E1210 11:08:18.349846 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api-log" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350047 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api-log" Dec 10 11:08:18 crc kubenswrapper[4682]: E1210 11:08:18.350140 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerName="init" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350209 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerName="init" Dec 10 11:08:18 crc kubenswrapper[4682]: E1210 11:08:18.350290 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350352 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api" Dec 10 11:08:18 crc kubenswrapper[4682]: E1210 11:08:18.350417 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerName="dnsmasq-dns" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350563 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerName="dnsmasq-dns" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350809 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350878 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" containerName="cinder-api-log" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.350935 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" containerName="dnsmasq-dns" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.352047 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.357696 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.357736 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.357885 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.365126 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.380688 4682 scope.go:117] "RemoveContainer" containerID="55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726" Dec 10 11:08:18 crc kubenswrapper[4682]: E1210 11:08:18.394083 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726\": container with ID starting with 55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726 not found: ID does not exist" containerID="55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.394128 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726"} err="failed to get container status \"55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726\": rpc error: code = NotFound desc = could not find container \"55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726\": container with ID starting with 55ae6882e6c0b575f25c706b700d328c85797cbc238369a0b1f7c0a03a863726 not found: ID does not exist" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.394159 4682 scope.go:117] "RemoveContainer" containerID="2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847" Dec 10 11:08:18 crc kubenswrapper[4682]: E1210 11:08:18.397618 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847\": container with ID starting with 2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847 not found: ID does not exist" containerID="2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.397721 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847"} err="failed to get container status \"2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847\": rpc error: code = NotFound desc = could not find container \"2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847\": container with ID starting with 2cf466dbef88d4c168c35762a827fba58f52d7027ca7b6417bfdeb873bffb847 not found: ID does not exist" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.402310 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a8833f9-3cd6-4100-acae-847c61f5a6ed" path="/var/lib/kubelet/pods/9a8833f9-3cd6-4100-acae-847c61f5a6ed/volumes" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.403787 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed05abe1-c54a-4c41-9478-d5a9a0ea076c" path="/var/lib/kubelet/pods/ed05abe1-c54a-4c41-9478-d5a9a0ea076c/volumes" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435567 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-config-data\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435631 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/393918ae-0996-472c-9f98-1862109d9f54-etc-machine-id\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435659 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-public-tls-certs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435688 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435756 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-scripts\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435833 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/393918ae-0996-472c-9f98-1862109d9f54-logs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435864 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435910 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksd7l\" (UniqueName: \"kubernetes.io/projected/393918ae-0996-472c-9f98-1862109d9f54-kube-api-access-ksd7l\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.435930 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-config-data-custom\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.537959 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/393918ae-0996-472c-9f98-1862109d9f54-logs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538012 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538038 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksd7l\" (UniqueName: \"kubernetes.io/projected/393918ae-0996-472c-9f98-1862109d9f54-kube-api-access-ksd7l\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538063 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-config-data-custom\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538157 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-config-data\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538196 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/393918ae-0996-472c-9f98-1862109d9f54-etc-machine-id\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538218 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-public-tls-certs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538242 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.538277 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-scripts\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.540446 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/393918ae-0996-472c-9f98-1862109d9f54-etc-machine-id\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.542986 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/393918ae-0996-472c-9f98-1862109d9f54-logs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.545076 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.545098 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-public-tls-certs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.546940 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-scripts\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.549054 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-config-data-custom\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.549056 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.550001 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/393918ae-0996-472c-9f98-1862109d9f54-config-data\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.562536 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksd7l\" (UniqueName: \"kubernetes.io/projected/393918ae-0996-472c-9f98-1862109d9f54-kube-api-access-ksd7l\") pod \"cinder-api-0\" (UID: \"393918ae-0996-472c-9f98-1862109d9f54\") " pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.696181 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.880779 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7475fff587-94bkc"] Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.883584 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.889560 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.889804 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.890318 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.910431 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7475fff587-94bkc"] Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947636 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-public-tls-certs\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947695 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/613d10c2-81be-4ff7-8f40-528d35c931e0-etc-swift\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947726 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-combined-ca-bundle\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947821 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/613d10c2-81be-4ff7-8f40-528d35c931e0-log-httpd\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947870 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-config-data\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947896 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-internal-tls-certs\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947912 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/613d10c2-81be-4ff7-8f40-528d35c931e0-run-httpd\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:18 crc kubenswrapper[4682]: I1210 11:08:18.947927 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvx9q\" (UniqueName: \"kubernetes.io/projected/613d10c2-81be-4ff7-8f40-528d35c931e0-kube-api-access-fvx9q\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.049767 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-config-data\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.049831 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-internal-tls-certs\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.049858 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/613d10c2-81be-4ff7-8f40-528d35c931e0-run-httpd\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.049880 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvx9q\" (UniqueName: \"kubernetes.io/projected/613d10c2-81be-4ff7-8f40-528d35c931e0-kube-api-access-fvx9q\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.049933 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-public-tls-certs\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.049964 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/613d10c2-81be-4ff7-8f40-528d35c931e0-etc-swift\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.050003 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-combined-ca-bundle\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.050143 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/613d10c2-81be-4ff7-8f40-528d35c931e0-log-httpd\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.050604 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/613d10c2-81be-4ff7-8f40-528d35c931e0-log-httpd\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.050885 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/613d10c2-81be-4ff7-8f40-528d35c931e0-run-httpd\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.057410 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-combined-ca-bundle\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.058625 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-internal-tls-certs\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.060109 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-public-tls-certs\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.065571 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/613d10c2-81be-4ff7-8f40-528d35c931e0-config-data\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.066954 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/613d10c2-81be-4ff7-8f40-528d35c931e0-etc-swift\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.073387 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvx9q\" (UniqueName: \"kubernetes.io/projected/613d10c2-81be-4ff7-8f40-528d35c931e0-kube-api-access-fvx9q\") pod \"swift-proxy-7475fff587-94bkc\" (UID: \"613d10c2-81be-4ff7-8f40-528d35c931e0\") " pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.212807 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:19 crc kubenswrapper[4682]: W1210 11:08:19.262765 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod393918ae_0996_472c_9f98_1862109d9f54.slice/crio-2814cb7d3632bac19f239875d0e4a49530b43ceb1fd508885b3b78101127d876 WatchSource:0}: Error finding container 2814cb7d3632bac19f239875d0e4a49530b43ceb1fd508885b3b78101127d876: Status 404 returned error can't find the container with id 2814cb7d3632bac19f239875d0e4a49530b43ceb1fd508885b3b78101127d876 Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.266981 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.280263 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"393918ae-0996-472c-9f98-1862109d9f54","Type":"ContainerStarted","Data":"2814cb7d3632bac19f239875d0e4a49530b43ceb1fd508885b3b78101127d876"} Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.837170 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7475fff587-94bkc"] Dec 10 11:08:19 crc kubenswrapper[4682]: W1210 11:08:19.851707 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod613d10c2_81be_4ff7_8f40_528d35c931e0.slice/crio-12b93474b62c9c7560696c9a4e292f29f91abb189c55e4ee38a76b8581af04f3 WatchSource:0}: Error finding container 12b93474b62c9c7560696c9a4e292f29f91abb189c55e4ee38a76b8581af04f3: Status 404 returned error can't find the container with id 12b93474b62c9c7560696c9a4e292f29f91abb189c55e4ee38a76b8581af04f3 Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.982502 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.982839 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-central-agent" containerID="cri-o://744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69" gracePeriod=30 Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.983653 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-notification-agent" containerID="cri-o://9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae" gracePeriod=30 Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.983631 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="sg-core" containerID="cri-o://2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98" gracePeriod=30 Dec 10 11:08:19 crc kubenswrapper[4682]: I1210 11:08:19.983858 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="proxy-httpd" containerID="cri-o://5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15" gracePeriod=30 Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.318791 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"393918ae-0996-472c-9f98-1862109d9f54","Type":"ContainerStarted","Data":"6ad4f659f13deaa4a8de25e870e0fafa68b7fcd913daa1e8fd52d68dcac9392f"} Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.323795 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7475fff587-94bkc" event={"ID":"613d10c2-81be-4ff7-8f40-528d35c931e0","Type":"ContainerStarted","Data":"db4b63128f6b5fcea6ddc7133c5f056f748b291270f7c7840d15b1abb4005657"} Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.323862 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7475fff587-94bkc" event={"ID":"613d10c2-81be-4ff7-8f40-528d35c931e0","Type":"ContainerStarted","Data":"12b93474b62c9c7560696c9a4e292f29f91abb189c55e4ee38a76b8581af04f3"} Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.347120 4682 generic.go:334] "Generic (PLEG): container finished" podID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerID="5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15" exitCode=0 Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.347156 4682 generic.go:334] "Generic (PLEG): container finished" podID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerID="2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98" exitCode=2 Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.347178 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerDied","Data":"5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15"} Dec 10 11:08:20 crc kubenswrapper[4682]: I1210 11:08:20.347203 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerDied","Data":"2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98"} Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.387975 4682 generic.go:334] "Generic (PLEG): container finished" podID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerID="744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69" exitCode=0 Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.388590 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerDied","Data":"744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69"} Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.399845 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"393918ae-0996-472c-9f98-1862109d9f54","Type":"ContainerStarted","Data":"95d7f5b4591e040bb4f76cf471d7407d8557d4ae27876a2b1713aa2a2a1787dc"} Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.401010 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.407642 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7475fff587-94bkc" event={"ID":"613d10c2-81be-4ff7-8f40-528d35c931e0","Type":"ContainerStarted","Data":"532be4437e52ac29fbcdc713f237e269512e1232bc591590df951116b93940c0"} Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.407860 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.407899 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.435159 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.435140323 podStartE2EDuration="3.435140323s" podCreationTimestamp="2025-12-10 11:08:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:21.42202214 +0000 UTC m=+1381.742232890" watchObservedRunningTime="2025-12-10 11:08:21.435140323 +0000 UTC m=+1381.755351073" Dec 10 11:08:21 crc kubenswrapper[4682]: I1210 11:08:21.458981 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7475fff587-94bkc" podStartSLOduration=3.458963305 podStartE2EDuration="3.458963305s" podCreationTimestamp="2025-12-10 11:08:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:21.449421331 +0000 UTC m=+1381.769632091" watchObservedRunningTime="2025-12-10 11:08:21.458963305 +0000 UTC m=+1381.779174055" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.126674 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.242192 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-log-httpd\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.242384 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-combined-ca-bundle\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.242443 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-scripts\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.243027 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.243228 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-sg-core-conf-yaml\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.243341 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-run-httpd\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.243385 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-config-data\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.243422 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvb25\" (UniqueName: \"kubernetes.io/projected/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-kube-api-access-dvb25\") pod \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\" (UID: \"54a0138b-bd8c-4f9c-8858-7c8b41798e5e\") " Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.243769 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.244030 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.244049 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.265019 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-kube-api-access-dvb25" (OuterVolumeSpecName: "kube-api-access-dvb25") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "kube-api-access-dvb25". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.274847 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-scripts" (OuterVolumeSpecName: "scripts") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.333789 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.347875 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.347974 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.347986 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvb25\" (UniqueName: \"kubernetes.io/projected/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-kube-api-access-dvb25\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.443628 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.452452 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.471167 4682 generic.go:334] "Generic (PLEG): container finished" podID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerID="9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae" exitCode=0 Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.472810 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.473895 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerDied","Data":"9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae"} Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.474008 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"54a0138b-bd8c-4f9c-8858-7c8b41798e5e","Type":"ContainerDied","Data":"f03059924a6ae4f29e669101061d0208b57b8471858d68c82cfe28daae818092"} Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.474041 4682 scope.go:117] "RemoveContainer" containerID="5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.497980 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-config-data" (OuterVolumeSpecName: "config-data") pod "54a0138b-bd8c-4f9c-8858-7c8b41798e5e" (UID: "54a0138b-bd8c-4f9c-8858-7c8b41798e5e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.511462 4682 scope.go:117] "RemoveContainer" containerID="2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.559093 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54a0138b-bd8c-4f9c-8858-7c8b41798e5e-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.561710 4682 scope.go:117] "RemoveContainer" containerID="9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.593913 4682 scope.go:117] "RemoveContainer" containerID="744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.685826 4682 scope.go:117] "RemoveContainer" containerID="5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.686560 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15\": container with ID starting with 5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15 not found: ID does not exist" containerID="5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.686601 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15"} err="failed to get container status \"5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15\": rpc error: code = NotFound desc = could not find container \"5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15\": container with ID starting with 5b25436f00e4762c3f4f0128d86288673352db85a3a7ded260af6cac16b47c15 not found: ID does not exist" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.686629 4682 scope.go:117] "RemoveContainer" containerID="2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.687854 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98\": container with ID starting with 2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98 not found: ID does not exist" containerID="2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.687881 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98"} err="failed to get container status \"2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98\": rpc error: code = NotFound desc = could not find container \"2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98\": container with ID starting with 2ba55ec7dd08cde9cc72578c20b49b3ae6932b7f2b97148421fd651710efdd98 not found: ID does not exist" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.688042 4682 scope.go:117] "RemoveContainer" containerID="9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.688501 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae\": container with ID starting with 9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae not found: ID does not exist" containerID="9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.688543 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae"} err="failed to get container status \"9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae\": rpc error: code = NotFound desc = could not find container \"9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae\": container with ID starting with 9aa6f100770714cb3234ae09f213e08b77a4809ebf4e3681e69ffd0f26a7bdae not found: ID does not exist" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.688565 4682 scope.go:117] "RemoveContainer" containerID="744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.689573 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69\": container with ID starting with 744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69 not found: ID does not exist" containerID="744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.689603 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69"} err="failed to get container status \"744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69\": rpc error: code = NotFound desc = could not find container \"744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69\": container with ID starting with 744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69 not found: ID does not exist" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.807576 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.826187 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845006 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.845516 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-notification-agent" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845533 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-notification-agent" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.845560 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="proxy-httpd" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845567 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="proxy-httpd" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.845589 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="sg-core" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845596 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="sg-core" Dec 10 11:08:22 crc kubenswrapper[4682]: E1210 11:08:22.845625 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-central-agent" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845631 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-central-agent" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845819 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-central-agent" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845828 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="proxy-httpd" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845843 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="sg-core" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.845856 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" containerName="ceilometer-notification-agent" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.847816 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.853284 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.853517 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.859129 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.967610 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-config-data\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.967924 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t499\" (UniqueName: \"kubernetes.io/projected/c1aa28b0-107b-411c-a7c1-9646565c49a9-kube-api-access-9t499\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.967949 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.967994 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-log-httpd\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.968055 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-scripts\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.968084 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-run-httpd\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:22 crc kubenswrapper[4682]: I1210 11:08:22.968371 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070687 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070741 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-config-data\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070808 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t499\" (UniqueName: \"kubernetes.io/projected/c1aa28b0-107b-411c-a7c1-9646565c49a9-kube-api-access-9t499\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070832 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070880 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-log-httpd\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070952 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-scripts\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.070979 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-run-httpd\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.071549 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-log-httpd\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.072349 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-run-httpd\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.074679 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-scripts\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.076067 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.076352 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-config-data\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.079000 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.087328 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t499\" (UniqueName: \"kubernetes.io/projected/c1aa28b0-107b-411c-a7c1-9646565c49a9-kube-api-access-9t499\") pod \"ceilometer-0\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: I1210 11:08:23.174182 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:23 crc kubenswrapper[4682]: E1210 11:08:23.438452 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-conmon-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:08:24 crc kubenswrapper[4682]: I1210 11:08:24.395809 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54a0138b-bd8c-4f9c-8858-7c8b41798e5e" path="/var/lib/kubelet/pods/54a0138b-bd8c-4f9c-8858-7c8b41798e5e/volumes" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.761703 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-xkg9x"] Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.763936 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.777686 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xkg9x"] Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.829528 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3adb294d-5fbd-4f36-b324-8e99e2e22cee-operator-scripts\") pod \"nova-api-db-create-xkg9x\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.829711 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qv896\" (UniqueName: \"kubernetes.io/projected/3adb294d-5fbd-4f36-b324-8e99e2e22cee-kube-api-access-qv896\") pod \"nova-api-db-create-xkg9x\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.853514 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-dtzdg"] Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.854743 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.864634 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-dtzdg"] Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.876962 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f97b-account-create-update-97hvk"] Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.878592 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.884860 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.913664 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f97b-account-create-update-97hvk"] Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.933134 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3adb294d-5fbd-4f36-b324-8e99e2e22cee-operator-scripts\") pod \"nova-api-db-create-xkg9x\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.933347 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qv896\" (UniqueName: \"kubernetes.io/projected/3adb294d-5fbd-4f36-b324-8e99e2e22cee-kube-api-access-qv896\") pod \"nova-api-db-create-xkg9x\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.933393 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvcv5\" (UniqueName: \"kubernetes.io/projected/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-kube-api-access-vvcv5\") pod \"nova-cell0-db-create-dtzdg\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.933416 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-operator-scripts\") pod \"nova-cell0-db-create-dtzdg\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.936260 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3adb294d-5fbd-4f36-b324-8e99e2e22cee-operator-scripts\") pod \"nova-api-db-create-xkg9x\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.971324 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qv896\" (UniqueName: \"kubernetes.io/projected/3adb294d-5fbd-4f36-b324-8e99e2e22cee-kube-api-access-qv896\") pod \"nova-api-db-create-xkg9x\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:25 crc kubenswrapper[4682]: I1210 11:08:25.996695 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dfvhs"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.019183 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.032363 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dfvhs"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.038985 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5hn8\" (UniqueName: \"kubernetes.io/projected/695c9c33-02b4-4ba2-86d6-a6def1e67513-kube-api-access-f5hn8\") pod \"nova-api-f97b-account-create-update-97hvk\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.044169 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvcv5\" (UniqueName: \"kubernetes.io/projected/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-kube-api-access-vvcv5\") pod \"nova-cell0-db-create-dtzdg\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.044226 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-operator-scripts\") pod \"nova-cell0-db-create-dtzdg\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.044260 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695c9c33-02b4-4ba2-86d6-a6def1e67513-operator-scripts\") pod \"nova-api-f97b-account-create-update-97hvk\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.046258 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-operator-scripts\") pod \"nova-cell0-db-create-dtzdg\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.084636 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1f8c-account-create-update-lr7tf"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.093076 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvcv5\" (UniqueName: \"kubernetes.io/projected/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-kube-api-access-vvcv5\") pod \"nova-cell0-db-create-dtzdg\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.093683 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.108002 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.111764 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1f8c-account-create-update-lr7tf"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.121983 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.146581 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.146747 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8mdm\" (UniqueName: \"kubernetes.io/projected/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-kube-api-access-v8mdm\") pod \"nova-cell1-db-create-dfvhs\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.147051 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695c9c33-02b4-4ba2-86d6-a6def1e67513-operator-scripts\") pod \"nova-api-f97b-account-create-update-97hvk\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.147114 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-operator-scripts\") pod \"nova-cell1-db-create-dfvhs\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.147274 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5hn8\" (UniqueName: \"kubernetes.io/projected/695c9c33-02b4-4ba2-86d6-a6def1e67513-kube-api-access-f5hn8\") pod \"nova-api-f97b-account-create-update-97hvk\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.148408 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695c9c33-02b4-4ba2-86d6-a6def1e67513-operator-scripts\") pod \"nova-api-f97b-account-create-update-97hvk\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.173924 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.175081 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5hn8\" (UniqueName: \"kubernetes.io/projected/695c9c33-02b4-4ba2-86d6-a6def1e67513-kube-api-access-f5hn8\") pod \"nova-api-f97b-account-create-update-97hvk\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.207962 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.249997 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f694p\" (UniqueName: \"kubernetes.io/projected/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-kube-api-access-f694p\") pod \"nova-cell0-1f8c-account-create-update-lr7tf\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.250105 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8mdm\" (UniqueName: \"kubernetes.io/projected/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-kube-api-access-v8mdm\") pod \"nova-cell1-db-create-dfvhs\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.250163 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-operator-scripts\") pod \"nova-cell0-1f8c-account-create-update-lr7tf\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.250237 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-operator-scripts\") pod \"nova-cell1-db-create-dfvhs\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.251102 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-operator-scripts\") pod \"nova-cell1-db-create-dfvhs\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.268725 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-7081-account-create-update-tgr2n"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.270843 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.272978 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8mdm\" (UniqueName: \"kubernetes.io/projected/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-kube-api-access-v8mdm\") pod \"nova-cell1-db-create-dfvhs\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.276090 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.305566 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7081-account-create-update-tgr2n"] Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.354440 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-operator-scripts\") pod \"nova-cell0-1f8c-account-create-update-lr7tf\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.354690 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msxlt\" (UniqueName: \"kubernetes.io/projected/e060569d-f156-4f2e-9796-e304b2d2be0d-kube-api-access-msxlt\") pod \"nova-cell1-7081-account-create-update-tgr2n\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.354726 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e060569d-f156-4f2e-9796-e304b2d2be0d-operator-scripts\") pod \"nova-cell1-7081-account-create-update-tgr2n\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.354830 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f694p\" (UniqueName: \"kubernetes.io/projected/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-kube-api-access-f694p\") pod \"nova-cell0-1f8c-account-create-update-lr7tf\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.356084 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-operator-scripts\") pod \"nova-cell0-1f8c-account-create-update-lr7tf\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.361268 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.409974 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f694p\" (UniqueName: \"kubernetes.io/projected/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-kube-api-access-f694p\") pod \"nova-cell0-1f8c-account-create-update-lr7tf\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.457016 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msxlt\" (UniqueName: \"kubernetes.io/projected/e060569d-f156-4f2e-9796-e304b2d2be0d-kube-api-access-msxlt\") pod \"nova-cell1-7081-account-create-update-tgr2n\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.457068 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e060569d-f156-4f2e-9796-e304b2d2be0d-operator-scripts\") pod \"nova-cell1-7081-account-create-update-tgr2n\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.459194 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e060569d-f156-4f2e-9796-e304b2d2be0d-operator-scripts\") pod \"nova-cell1-7081-account-create-update-tgr2n\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.470047 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.498135 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msxlt\" (UniqueName: \"kubernetes.io/projected/e060569d-f156-4f2e-9796-e304b2d2be0d-kube-api-access-msxlt\") pod \"nova-cell1-7081-account-create-update-tgr2n\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:26 crc kubenswrapper[4682]: I1210 11:08:26.637775 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:27 crc kubenswrapper[4682]: I1210 11:08:27.735817 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:08:27 crc kubenswrapper[4682]: I1210 11:08:27.736125 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-log" containerID="cri-o://841f903c864af41eb790a2eeb05e5894e8a041bc91027390add3e985943784a9" gracePeriod=30 Dec 10 11:08:27 crc kubenswrapper[4682]: I1210 11:08:27.736286 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-httpd" containerID="cri-o://c07a004f8ebf4a18c4b2cd5c2c652e998b5169cb8dd7a1d921bc1b8a41a069e7" gracePeriod=30 Dec 10 11:08:28 crc kubenswrapper[4682]: I1210 11:08:28.599120 4682 generic.go:334] "Generic (PLEG): container finished" podID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerID="841f903c864af41eb790a2eeb05e5894e8a041bc91027390add3e985943784a9" exitCode=143 Dec 10 11:08:28 crc kubenswrapper[4682]: I1210 11:08:28.599327 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8f8fbad-ac9b-4103-8370-9693c234d655","Type":"ContainerDied","Data":"841f903c864af41eb790a2eeb05e5894e8a041bc91027390add3e985943784a9"} Dec 10 11:08:28 crc kubenswrapper[4682]: I1210 11:08:28.883597 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:08:28 crc kubenswrapper[4682]: I1210 11:08:28.883899 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-log" containerID="cri-o://bbb7dbed30b8d9ae6765192592f9188ce3c230c04639e698ed66bbb3655bdcaa" gracePeriod=30 Dec 10 11:08:28 crc kubenswrapper[4682]: I1210 11:08:28.883960 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-httpd" containerID="cri-o://1f2f1769d31823f996453883380bcdc3a63510f9032ad956230c5077e26381df" gracePeriod=30 Dec 10 11:08:29 crc kubenswrapper[4682]: I1210 11:08:29.300816 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:29 crc kubenswrapper[4682]: I1210 11:08:29.308671 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7475fff587-94bkc" Dec 10 11:08:29 crc kubenswrapper[4682]: I1210 11:08:29.626708 4682 generic.go:334] "Generic (PLEG): container finished" podID="72889b73-e773-4559-800e-a87032e35a05" containerID="bbb7dbed30b8d9ae6765192592f9188ce3c230c04639e698ed66bbb3655bdcaa" exitCode=143 Dec 10 11:08:29 crc kubenswrapper[4682]: I1210 11:08:29.627549 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"72889b73-e773-4559-800e-a87032e35a05","Type":"ContainerDied","Data":"bbb7dbed30b8d9ae6765192592f9188ce3c230c04639e698ed66bbb3655bdcaa"} Dec 10 11:08:30 crc kubenswrapper[4682]: I1210 11:08:30.940566 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.168:9292/healthcheck\": read tcp 10.217.0.2:40310->10.217.0.168:9292: read: connection reset by peer" Dec 10 11:08:30 crc kubenswrapper[4682]: I1210 11:08:30.940566 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.168:9292/healthcheck\": read tcp 10.217.0.2:40326->10.217.0.168:9292: read: connection reset by peer" Dec 10 11:08:30 crc kubenswrapper[4682]: I1210 11:08:30.956721 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.211182 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f97b-account-create-update-97hvk"] Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.683693 4682 generic.go:334] "Generic (PLEG): container finished" podID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerID="c07a004f8ebf4a18c4b2cd5c2c652e998b5169cb8dd7a1d921bc1b8a41a069e7" exitCode=0 Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.684065 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8f8fbad-ac9b-4103-8370-9693c234d655","Type":"ContainerDied","Data":"c07a004f8ebf4a18c4b2cd5c2c652e998b5169cb8dd7a1d921bc1b8a41a069e7"} Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.710602 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f97b-account-create-update-97hvk" event={"ID":"695c9c33-02b4-4ba2-86d6-a6def1e67513","Type":"ContainerStarted","Data":"0ab79430a38612f960ad1c222034fc52c0932c97f823a877e805990681a644c5"} Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.719033 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6073de7e-e347-4fb0-b607-21aaf92384b1","Type":"ContainerStarted","Data":"6f1169efe2aecfad5a1e4b3d22d7c8e1d9a9b26251102d71970146fcf5cb410d"} Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.720441 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7081-account-create-update-tgr2n"] Dec 10 11:08:31 crc kubenswrapper[4682]: W1210 11:08:31.735717 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode060569d_f156_4f2e_9796_e304b2d2be0d.slice/crio-05adc622a9cf5387337e6f2d4a6dfa9881cae21384c8fc55c39baa389ac4a8fe WatchSource:0}: Error finding container 05adc622a9cf5387337e6f2d4a6dfa9881cae21384c8fc55c39baa389ac4a8fe: Status 404 returned error can't find the container with id 05adc622a9cf5387337e6f2d4a6dfa9881cae21384c8fc55c39baa389ac4a8fe Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.743226 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xkg9x"] Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.773349 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dfvhs"] Dec 10 11:08:31 crc kubenswrapper[4682]: I1210 11:08:31.818605 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1f8c-account-create-update-lr7tf"] Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.084863 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-dtzdg"] Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.118622 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.127038 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.299662137 podStartE2EDuration="18.127011962s" podCreationTimestamp="2025-12-10 11:08:14 +0000 UTC" firstStartedPulling="2025-12-10 11:08:15.475335513 +0000 UTC m=+1375.795546263" lastFinishedPulling="2025-12-10 11:08:30.302685338 +0000 UTC m=+1390.622896088" observedRunningTime="2025-12-10 11:08:31.761457593 +0000 UTC m=+1392.081668343" watchObservedRunningTime="2025-12-10 11:08:32.127011962 +0000 UTC m=+1392.447222712" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.129970 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.169:9292/healthcheck\": read tcp 10.217.0.2:37976->10.217.0.169:9292: read: connection reset by peer" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.130397 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.169:9292/healthcheck\": read tcp 10.217.0.2:37972->10.217.0.169:9292: read: connection reset by peer" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.368878 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.453966 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454088 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-httpd-run\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454184 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-public-tls-certs\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454249 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-logs\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454287 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-scripts\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454332 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-config-data\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454366 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fng64\" (UniqueName: \"kubernetes.io/projected/a8f8fbad-ac9b-4103-8370-9693c234d655-kube-api-access-fng64\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.454445 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-combined-ca-bundle\") pod \"a8f8fbad-ac9b-4103-8370-9693c234d655\" (UID: \"a8f8fbad-ac9b-4103-8370-9693c234d655\") " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.456007 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-logs" (OuterVolumeSpecName: "logs") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.456350 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.496999 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-scripts" (OuterVolumeSpecName: "scripts") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.497392 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8f8fbad-ac9b-4103-8370-9693c234d655-kube-api-access-fng64" (OuterVolumeSpecName: "kube-api-access-fng64") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "kube-api-access-fng64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.575466 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.575730 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.575742 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fng64\" (UniqueName: \"kubernetes.io/projected/a8f8fbad-ac9b-4103-8370-9693c234d655-kube-api-access-fng64\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.575753 4682 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8f8fbad-ac9b-4103-8370-9693c234d655-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.587611 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.677942 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.742018 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dfvhs" event={"ID":"636ce24d-c743-4ca9-b253-8c5da3d9f7c8","Type":"ContainerStarted","Data":"3e3a064cf5a84b5b5a9421d80d6a2e5668f6691ffe126fa9c89a22cc03f359f6"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.742960 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dtzdg" event={"ID":"19e9c14e-416c-4f11-96ff-d2ccdac04cdf","Type":"ContainerStarted","Data":"a73122e741fa499f1fecc2117e840ca8040b795c78d3dc01e0cce2a8cef17d36"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.744582 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerStarted","Data":"0b4e3fc6d6e7968a5e3cb3be8194bdbfd190c6e0694ce2b9e4dd5992aac8e131"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.752059 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8f8fbad-ac9b-4103-8370-9693c234d655","Type":"ContainerDied","Data":"4f578b25f7d27ccbee2e222c3b295d0d7d903b077a29e88583177ae93e56129b"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.752433 4682 scope.go:117] "RemoveContainer" containerID="c07a004f8ebf4a18c4b2cd5c2c652e998b5169cb8dd7a1d921bc1b8a41a069e7" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.752585 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.756631 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9" (OuterVolumeSpecName: "glance") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.766248 4682 generic.go:334] "Generic (PLEG): container finished" podID="72889b73-e773-4559-800e-a87032e35a05" containerID="1f2f1769d31823f996453883380bcdc3a63510f9032ad956230c5077e26381df" exitCode=0 Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.766310 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"72889b73-e773-4559-800e-a87032e35a05","Type":"ContainerDied","Data":"1f2f1769d31823f996453883380bcdc3a63510f9032ad956230c5077e26381df"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.768743 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" event={"ID":"e060569d-f156-4f2e-9796-e304b2d2be0d","Type":"ContainerStarted","Data":"05adc622a9cf5387337e6f2d4a6dfa9881cae21384c8fc55c39baa389ac4a8fe"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.773813 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" event={"ID":"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4","Type":"ContainerStarted","Data":"43267757b96d8afd464ee7a2619cd6020430ba44458a47f23f3f355dbe902c0a"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.779902 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") on node \"crc\" " Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.780052 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xkg9x" event={"ID":"3adb294d-5fbd-4f36-b324-8e99e2e22cee","Type":"ContainerStarted","Data":"e2b142a67f4c8e39e7349dbc3a7abd5c6d6b431d4489b59f2b7a472d101ca902"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.781829 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f97b-account-create-update-97hvk" event={"ID":"695c9c33-02b4-4ba2-86d6-a6def1e67513","Type":"ContainerStarted","Data":"0baca866fff02b7e3ff6f983ed1e14551917a80f3170f4f93ab853288c0d194c"} Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.833207 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-xkg9x" podStartSLOduration=7.833186234 podStartE2EDuration="7.833186234s" podCreationTimestamp="2025-12-10 11:08:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:32.806431004 +0000 UTC m=+1393.126641864" watchObservedRunningTime="2025-12-10 11:08:32.833186234 +0000 UTC m=+1393.153396984" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.863876 4682 scope.go:117] "RemoveContainer" containerID="841f903c864af41eb790a2eeb05e5894e8a041bc91027390add3e985943784a9" Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.989281 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:08:32 crc kubenswrapper[4682]: I1210 11:08:32.992836 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9") on node "crc" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.017215 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.087775 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.087822 4682 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.110760 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-config-data" (OuterVolumeSpecName: "config-data") pod "a8f8fbad-ac9b-4103-8370-9693c234d655" (UID: "a8f8fbad-ac9b-4103-8370-9693c234d655"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.189256 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8f8fbad-ac9b-4103-8370-9693c234d655-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.282806 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409266 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409611 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-scripts\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409702 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-logs\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409753 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-internal-tls-certs\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409804 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-httpd-run\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409880 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-combined-ca-bundle\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409924 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4tsr\" (UniqueName: \"kubernetes.io/projected/72889b73-e773-4559-800e-a87032e35a05-kube-api-access-l4tsr\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.409986 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-config-data\") pod \"72889b73-e773-4559-800e-a87032e35a05\" (UID: \"72889b73-e773-4559-800e-a87032e35a05\") " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.414012 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.427309 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.438669 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-logs" (OuterVolumeSpecName: "logs") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.440664 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-scripts" (OuterVolumeSpecName: "scripts") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.449199 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72889b73-e773-4559-800e-a87032e35a05-kube-api-access-l4tsr" (OuterVolumeSpecName: "kube-api-access-l4tsr") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "kube-api-access-l4tsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.458579 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.466368 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52" (OuterVolumeSpecName: "glance") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "pvc-7787cbf6-7249-471e-a024-697e395dbc52". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.472931 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:08:33 crc kubenswrapper[4682]: E1210 11:08:33.473458 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-log" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473493 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-log" Dec 10 11:08:33 crc kubenswrapper[4682]: E1210 11:08:33.473511 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-log" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473519 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-log" Dec 10 11:08:33 crc kubenswrapper[4682]: E1210 11:08:33.473557 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-httpd" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473565 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-httpd" Dec 10 11:08:33 crc kubenswrapper[4682]: E1210 11:08:33.473577 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-httpd" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473586 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-httpd" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473861 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-httpd" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473885 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" containerName="glance-log" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473896 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-httpd" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.473909 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="72889b73-e773-4559-800e-a87032e35a05" containerName="glance-log" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.476708 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.480150 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.480401 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.514654 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.516104 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") on node \"crc\" " Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.516125 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.516137 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.516145 4682 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/72889b73-e773-4559-800e-a87032e35a05-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.516154 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4tsr\" (UniqueName: \"kubernetes.io/projected/72889b73-e773-4559-800e-a87032e35a05-kube-api-access-l4tsr\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.516934 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.545052 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-config-data" (OuterVolumeSpecName: "config-data") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.584643 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "72889b73-e773-4559-800e-a87032e35a05" (UID: "72889b73-e773-4559-800e-a87032e35a05"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.587740 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.587902 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7787cbf6-7249-471e-a024-697e395dbc52" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52") on node "crc" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.618930 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-config-data\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.618987 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13896178-eabd-4d1a-ad7c-8763c9b4f396-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619019 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13896178-eabd-4d1a-ad7c-8763c9b4f396-logs\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619060 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619092 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pcxb\" (UniqueName: \"kubernetes.io/projected/13896178-eabd-4d1a-ad7c-8763c9b4f396-kube-api-access-5pcxb\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619147 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619223 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619281 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-scripts\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619394 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619409 4682 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619422 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.619434 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72889b73-e773-4559-800e-a87032e35a05-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720538 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720625 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pcxb\" (UniqueName: \"kubernetes.io/projected/13896178-eabd-4d1a-ad7c-8763c9b4f396-kube-api-access-5pcxb\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720684 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720726 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720770 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-scripts\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720907 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-config-data\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720945 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13896178-eabd-4d1a-ad7c-8763c9b4f396-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.720973 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13896178-eabd-4d1a-ad7c-8763c9b4f396-logs\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.721644 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13896178-eabd-4d1a-ad7c-8763c9b4f396-logs\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.722102 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13896178-eabd-4d1a-ad7c-8763c9b4f396-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.725921 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-scripts\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.726023 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.726372 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.727430 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13896178-eabd-4d1a-ad7c-8763c9b4f396-config-data\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.730003 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.730047 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7b1923bf8bb403c24020ed876074f9fa5ba6aaf35e09637f2443da6ac1e5868a/globalmount\"" pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.751133 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pcxb\" (UniqueName: \"kubernetes.io/projected/13896178-eabd-4d1a-ad7c-8763c9b4f396-kube-api-access-5pcxb\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.835740 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bf54519-53a3-4ccc-ab18-375ffea0a7a9\") pod \"glance-default-external-api-0\" (UID: \"13896178-eabd-4d1a-ad7c-8763c9b4f396\") " pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.843970 4682 generic.go:334] "Generic (PLEG): container finished" podID="6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" containerID="79ef7bdfd497ae2ccedaab4746d45f88e415142a4c8d35d3d46f898712f41ae8" exitCode=0 Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.844099 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" event={"ID":"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4","Type":"ContainerDied","Data":"79ef7bdfd497ae2ccedaab4746d45f88e415142a4c8d35d3d46f898712f41ae8"} Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.848017 4682 generic.go:334] "Generic (PLEG): container finished" podID="3adb294d-5fbd-4f36-b324-8e99e2e22cee" containerID="89acc5342582652ee325e0bd473ede0c367f8d632a5988e4bd2ca363b38195c4" exitCode=0 Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.848096 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xkg9x" event={"ID":"3adb294d-5fbd-4f36-b324-8e99e2e22cee","Type":"ContainerDied","Data":"89acc5342582652ee325e0bd473ede0c367f8d632a5988e4bd2ca363b38195c4"} Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.857265 4682 generic.go:334] "Generic (PLEG): container finished" podID="695c9c33-02b4-4ba2-86d6-a6def1e67513" containerID="0baca866fff02b7e3ff6f983ed1e14551917a80f3170f4f93ab853288c0d194c" exitCode=0 Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.857333 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f97b-account-create-update-97hvk" event={"ID":"695c9c33-02b4-4ba2-86d6-a6def1e67513","Type":"ContainerDied","Data":"0baca866fff02b7e3ff6f983ed1e14551917a80f3170f4f93ab853288c0d194c"} Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.883105 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.883098 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"72889b73-e773-4559-800e-a87032e35a05","Type":"ContainerDied","Data":"512585cef0ad0eb65ff7e42f576a8fa48fc2e6da2bd9b2a7d1bb4eff98b273fc"} Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.883271 4682 scope.go:117] "RemoveContainer" containerID="1f2f1769d31823f996453883380bcdc3a63510f9032ad956230c5077e26381df" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.895781 4682 generic.go:334] "Generic (PLEG): container finished" podID="636ce24d-c743-4ca9-b253-8c5da3d9f7c8" containerID="f0121a462490bc1118c76bc208b7eef1a591d0918971ab3b1ecfcd2e70fa24b5" exitCode=0 Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.895882 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dfvhs" event={"ID":"636ce24d-c743-4ca9-b253-8c5da3d9f7c8","Type":"ContainerDied","Data":"f0121a462490bc1118c76bc208b7eef1a591d0918971ab3b1ecfcd2e70fa24b5"} Dec 10 11:08:33 crc kubenswrapper[4682]: E1210 11:08:33.901827 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod636ce24d_c743_4ca9_b253_8c5da3d9f7c8.slice/crio-f0121a462490bc1118c76bc208b7eef1a591d0918971ab3b1ecfcd2e70fa24b5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod636ce24d_c743_4ca9_b253_8c5da3d9f7c8.slice/crio-conmon-f0121a462490bc1118c76bc208b7eef1a591d0918971ab3b1ecfcd2e70fa24b5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode060569d_f156_4f2e_9796_e304b2d2be0d.slice/crio-cfacf6d7fa9e8a9e4fb96f62f3e5663cd2958841cca20f01714826dc88e85369.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3adb294d_5fbd_4f36_b324_8e99e2e22cee.slice/crio-89acc5342582652ee325e0bd473ede0c367f8d632a5988e4bd2ca363b38195c4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c8ebd3b_4d1e_45b3_92b3_577fe7d64dd4.slice/crio-conmon-79ef7bdfd497ae2ccedaab4746d45f88e415142a4c8d35d3d46f898712f41ae8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-conmon-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.916795 4682 generic.go:334] "Generic (PLEG): container finished" podID="19e9c14e-416c-4f11-96ff-d2ccdac04cdf" containerID="6054d3c423fb5a8fd1aba35c17d8c5de6ad5f45ec02ff5058eeb5e4bcebcb7dc" exitCode=0 Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.916912 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dtzdg" event={"ID":"19e9c14e-416c-4f11-96ff-d2ccdac04cdf","Type":"ContainerDied","Data":"6054d3c423fb5a8fd1aba35c17d8c5de6ad5f45ec02ff5058eeb5e4bcebcb7dc"} Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.931889 4682 generic.go:334] "Generic (PLEG): container finished" podID="e060569d-f156-4f2e-9796-e304b2d2be0d" containerID="cfacf6d7fa9e8a9e4fb96f62f3e5663cd2958841cca20f01714826dc88e85369" exitCode=0 Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.931948 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" event={"ID":"e060569d-f156-4f2e-9796-e304b2d2be0d","Type":"ContainerDied","Data":"cfacf6d7fa9e8a9e4fb96f62f3e5663cd2958841cca20f01714826dc88e85369"} Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.943286 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.962712 4682 scope.go:117] "RemoveContainer" containerID="bbb7dbed30b8d9ae6765192592f9188ce3c230c04639e698ed66bbb3655bdcaa" Dec 10 11:08:33 crc kubenswrapper[4682]: I1210 11:08:33.987614 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.005624 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.057959 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.061803 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.065900 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.066282 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.110316 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.133793 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.133832 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38946e9d-e072-4758-8afb-dbdafdec204d-logs\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.134040 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6qwx\" (UniqueName: \"kubernetes.io/projected/38946e9d-e072-4758-8afb-dbdafdec204d-kube-api-access-w6qwx\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.134114 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.134172 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/38946e9d-e072-4758-8afb-dbdafdec204d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.134198 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.134247 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.134297 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237066 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237253 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237281 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38946e9d-e072-4758-8afb-dbdafdec204d-logs\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237435 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6qwx\" (UniqueName: \"kubernetes.io/projected/38946e9d-e072-4758-8afb-dbdafdec204d-kube-api-access-w6qwx\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237497 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237541 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/38946e9d-e072-4758-8afb-dbdafdec204d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237563 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.237600 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.239211 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/38946e9d-e072-4758-8afb-dbdafdec204d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.239639 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/38946e9d-e072-4758-8afb-dbdafdec204d-logs\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.243628 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.243839 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eee35181b01d2e9acbd6a7670c690b29128fb0f1ac4a3b3e7ea6260a2e4780e5/globalmount\"" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.249311 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.249311 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.251418 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.260244 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38946e9d-e072-4758-8afb-dbdafdec204d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.262234 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6qwx\" (UniqueName: \"kubernetes.io/projected/38946e9d-e072-4758-8afb-dbdafdec204d-kube-api-access-w6qwx\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.326600 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7787cbf6-7249-471e-a024-697e395dbc52\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7787cbf6-7249-471e-a024-697e395dbc52\") pod \"glance-default-internal-api-0\" (UID: \"38946e9d-e072-4758-8afb-dbdafdec204d\") " pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.404296 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72889b73-e773-4559-800e-a87032e35a05" path="/var/lib/kubelet/pods/72889b73-e773-4559-800e-a87032e35a05/volumes" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.405032 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8f8fbad-ac9b-4103-8370-9693c234d655" path="/var/lib/kubelet/pods/a8f8fbad-ac9b-4103-8370-9693c234d655/volumes" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.427527 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.448771 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.544523 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695c9c33-02b4-4ba2-86d6-a6def1e67513-operator-scripts\") pod \"695c9c33-02b4-4ba2-86d6-a6def1e67513\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.544862 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5hn8\" (UniqueName: \"kubernetes.io/projected/695c9c33-02b4-4ba2-86d6-a6def1e67513-kube-api-access-f5hn8\") pod \"695c9c33-02b4-4ba2-86d6-a6def1e67513\" (UID: \"695c9c33-02b4-4ba2-86d6-a6def1e67513\") " Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.546408 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/695c9c33-02b4-4ba2-86d6-a6def1e67513-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "695c9c33-02b4-4ba2-86d6-a6def1e67513" (UID: "695c9c33-02b4-4ba2-86d6-a6def1e67513"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.550529 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/695c9c33-02b4-4ba2-86d6-a6def1e67513-kube-api-access-f5hn8" (OuterVolumeSpecName: "kube-api-access-f5hn8") pod "695c9c33-02b4-4ba2-86d6-a6def1e67513" (UID: "695c9c33-02b4-4ba2-86d6-a6def1e67513"). InnerVolumeSpecName "kube-api-access-f5hn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.647257 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5hn8\" (UniqueName: \"kubernetes.io/projected/695c9c33-02b4-4ba2-86d6-a6def1e67513-kube-api-access-f5hn8\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.647625 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/695c9c33-02b4-4ba2-86d6-a6def1e67513-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.717117 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 10 11:08:34 crc kubenswrapper[4682]: W1210 11:08:34.735028 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13896178_eabd_4d1a_ad7c_8763c9b4f396.slice/crio-712d6cb4bff2943ff6320c726d5e2ae73fcef516f0ca48ff081e3645871478f9 WatchSource:0}: Error finding container 712d6cb4bff2943ff6320c726d5e2ae73fcef516f0ca48ff081e3645871478f9: Status 404 returned error can't find the container with id 712d6cb4bff2943ff6320c726d5e2ae73fcef516f0ca48ff081e3645871478f9 Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.973787 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerStarted","Data":"e2e01911ba9821d3565cdcea313ef52472a4bb4cbc1e4772548dcb6a1eba20b4"} Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.974351 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerStarted","Data":"24eaad1d4616ad1ff9dcde6aed47b89b45f1fdc8c5d790c07786ea483e9243f2"} Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.990759 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f97b-account-create-update-97hvk" event={"ID":"695c9c33-02b4-4ba2-86d6-a6def1e67513","Type":"ContainerDied","Data":"0ab79430a38612f960ad1c222034fc52c0932c97f823a877e805990681a644c5"} Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.990802 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ab79430a38612f960ad1c222034fc52c0932c97f823a877e805990681a644c5" Dec 10 11:08:34 crc kubenswrapper[4682]: I1210 11:08:34.990880 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f97b-account-create-update-97hvk" Dec 10 11:08:35 crc kubenswrapper[4682]: I1210 11:08:35.008888 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"13896178-eabd-4d1a-ad7c-8763c9b4f396","Type":"ContainerStarted","Data":"712d6cb4bff2943ff6320c726d5e2ae73fcef516f0ca48ff081e3645871478f9"} Dec 10 11:08:35 crc kubenswrapper[4682]: I1210 11:08:35.377723 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 10 11:08:35 crc kubenswrapper[4682]: I1210 11:08:35.908638 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.008713 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e060569d-f156-4f2e-9796-e304b2d2be0d-operator-scripts\") pod \"e060569d-f156-4f2e-9796-e304b2d2be0d\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.010696 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msxlt\" (UniqueName: \"kubernetes.io/projected/e060569d-f156-4f2e-9796-e304b2d2be0d-kube-api-access-msxlt\") pod \"e060569d-f156-4f2e-9796-e304b2d2be0d\" (UID: \"e060569d-f156-4f2e-9796-e304b2d2be0d\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.013106 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e060569d-f156-4f2e-9796-e304b2d2be0d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e060569d-f156-4f2e-9796-e304b2d2be0d" (UID: "e060569d-f156-4f2e-9796-e304b2d2be0d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.019349 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e060569d-f156-4f2e-9796-e304b2d2be0d-kube-api-access-msxlt" (OuterVolumeSpecName: "kube-api-access-msxlt") pod "e060569d-f156-4f2e-9796-e304b2d2be0d" (UID: "e060569d-f156-4f2e-9796-e304b2d2be0d"). InnerVolumeSpecName "kube-api-access-msxlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.079655 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" event={"ID":"e060569d-f156-4f2e-9796-e304b2d2be0d","Type":"ContainerDied","Data":"05adc622a9cf5387337e6f2d4a6dfa9881cae21384c8fc55c39baa389ac4a8fe"} Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.079696 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05adc622a9cf5387337e6f2d4a6dfa9881cae21384c8fc55c39baa389ac4a8fe" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.079753 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7081-account-create-update-tgr2n" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.121366 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msxlt\" (UniqueName: \"kubernetes.io/projected/e060569d-f156-4f2e-9796-e304b2d2be0d-kube-api-access-msxlt\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.121404 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e060569d-f156-4f2e-9796-e304b2d2be0d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.126782 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"38946e9d-e072-4758-8afb-dbdafdec204d","Type":"ContainerStarted","Data":"40ac843fe9bfaa77c52a5ee26788eeac2c7f1912a1f867f851330c00c3d34b0a"} Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.431600 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.435809 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.441114 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f694p\" (UniqueName: \"kubernetes.io/projected/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-kube-api-access-f694p\") pod \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.441345 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qv896\" (UniqueName: \"kubernetes.io/projected/3adb294d-5fbd-4f36-b324-8e99e2e22cee-kube-api-access-qv896\") pod \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.441454 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3adb294d-5fbd-4f36-b324-8e99e2e22cee-operator-scripts\") pod \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\" (UID: \"3adb294d-5fbd-4f36-b324-8e99e2e22cee\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.441599 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-operator-scripts\") pod \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\" (UID: \"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.443940 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3adb294d-5fbd-4f36-b324-8e99e2e22cee-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3adb294d-5fbd-4f36-b324-8e99e2e22cee" (UID: "3adb294d-5fbd-4f36-b324-8e99e2e22cee"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.453807 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-kube-api-access-f694p" (OuterVolumeSpecName: "kube-api-access-f694p") pod "6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" (UID: "6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4"). InnerVolumeSpecName "kube-api-access-f694p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.459755 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" (UID: "6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.462812 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3adb294d-5fbd-4f36-b324-8e99e2e22cee-kube-api-access-qv896" (OuterVolumeSpecName: "kube-api-access-qv896") pod "3adb294d-5fbd-4f36-b324-8e99e2e22cee" (UID: "3adb294d-5fbd-4f36-b324-8e99e2e22cee"). InnerVolumeSpecName "kube-api-access-qv896". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.478074 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.487007 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.487056 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.487098 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.487914 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d4f095c608a9033903a024629d6bdbb8e05d5ec10f831b06e26d70cfeb1c556"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.487972 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://9d4f095c608a9033903a024629d6bdbb8e05d5ec10f831b06e26d70cfeb1c556" gracePeriod=600 Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.498492 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.543387 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8mdm\" (UniqueName: \"kubernetes.io/projected/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-kube-api-access-v8mdm\") pod \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.543538 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-operator-scripts\") pod \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\" (UID: \"636ce24d-c743-4ca9-b253-8c5da3d9f7c8\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.543594 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-operator-scripts\") pod \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.543660 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvcv5\" (UniqueName: \"kubernetes.io/projected/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-kube-api-access-vvcv5\") pod \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\" (UID: \"19e9c14e-416c-4f11-96ff-d2ccdac04cdf\") " Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.544179 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qv896\" (UniqueName: \"kubernetes.io/projected/3adb294d-5fbd-4f36-b324-8e99e2e22cee-kube-api-access-qv896\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.544196 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3adb294d-5fbd-4f36-b324-8e99e2e22cee-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.544205 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.544222 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f694p\" (UniqueName: \"kubernetes.io/projected/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4-kube-api-access-f694p\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.549758 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19e9c14e-416c-4f11-96ff-d2ccdac04cdf" (UID: "19e9c14e-416c-4f11-96ff-d2ccdac04cdf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.549903 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "636ce24d-c743-4ca9-b253-8c5da3d9f7c8" (UID: "636ce24d-c743-4ca9-b253-8c5da3d9f7c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.561768 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-kube-api-access-v8mdm" (OuterVolumeSpecName: "kube-api-access-v8mdm") pod "636ce24d-c743-4ca9-b253-8c5da3d9f7c8" (UID: "636ce24d-c743-4ca9-b253-8c5da3d9f7c8"). InnerVolumeSpecName "kube-api-access-v8mdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.570449 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-kube-api-access-vvcv5" (OuterVolumeSpecName: "kube-api-access-vvcv5") pod "19e9c14e-416c-4f11-96ff-d2ccdac04cdf" (UID: "19e9c14e-416c-4f11-96ff-d2ccdac04cdf"). InnerVolumeSpecName "kube-api-access-vvcv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.653653 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8mdm\" (UniqueName: \"kubernetes.io/projected/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-kube-api-access-v8mdm\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.653955 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636ce24d-c743-4ca9-b253-8c5da3d9f7c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.653967 4682 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:36 crc kubenswrapper[4682]: I1210 11:08:36.653976 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvcv5\" (UniqueName: \"kubernetes.io/projected/19e9c14e-416c-4f11-96ff-d2ccdac04cdf-kube-api-access-vvcv5\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.164934 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="9d4f095c608a9033903a024629d6bdbb8e05d5ec10f831b06e26d70cfeb1c556" exitCode=0 Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.165062 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"9d4f095c608a9033903a024629d6bdbb8e05d5ec10f831b06e26d70cfeb1c556"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.165091 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.165109 4682 scope.go:117] "RemoveContainer" containerID="a87379aa7407b916521958c3640f1cf7fec14e9fe313d9dbea26901e472ba31c" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.171586 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xkg9x" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.171767 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xkg9x" event={"ID":"3adb294d-5fbd-4f36-b324-8e99e2e22cee","Type":"ContainerDied","Data":"e2b142a67f4c8e39e7349dbc3a7abd5c6d6b431d4489b59f2b7a472d101ca902"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.171799 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2b142a67f4c8e39e7349dbc3a7abd5c6d6b431d4489b59f2b7a472d101ca902" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.174218 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"13896178-eabd-4d1a-ad7c-8763c9b4f396","Type":"ContainerStarted","Data":"8c9c5dc78e36a4bcf9d80e229de55ab2348be04fc9bcb69a1407d48e3bd04888"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.178435 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dfvhs" event={"ID":"636ce24d-c743-4ca9-b253-8c5da3d9f7c8","Type":"ContainerDied","Data":"3e3a064cf5a84b5b5a9421d80d6a2e5668f6691ffe126fa9c89a22cc03f359f6"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.178486 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e3a064cf5a84b5b5a9421d80d6a2e5668f6691ffe126fa9c89a22cc03f359f6" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.178555 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dfvhs" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.197367 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dtzdg" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.200874 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dtzdg" event={"ID":"19e9c14e-416c-4f11-96ff-d2ccdac04cdf","Type":"ContainerDied","Data":"a73122e741fa499f1fecc2117e840ca8040b795c78d3dc01e0cce2a8cef17d36"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.200915 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a73122e741fa499f1fecc2117e840ca8040b795c78d3dc01e0cce2a8cef17d36" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.228305 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"38946e9d-e072-4758-8afb-dbdafdec204d","Type":"ContainerStarted","Data":"ee5e9be39bfd92ae024a7eacf5dc98e760f5e09b819a81fc1a1a1d78df3600c0"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.237942 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerStarted","Data":"d82817bf6c16ddef7569a14aa92e037db71d3d091f03fbcbd9811551a183d788"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.242612 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" event={"ID":"6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4","Type":"ContainerDied","Data":"43267757b96d8afd464ee7a2619cd6020430ba44458a47f23f3f355dbe902c0a"} Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.242651 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43267757b96d8afd464ee7a2619cd6020430ba44458a47f23f3f355dbe902c0a" Dec 10 11:08:37 crc kubenswrapper[4682]: I1210 11:08:37.242704 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1f8c-account-create-update-lr7tf" Dec 10 11:08:38 crc kubenswrapper[4682]: I1210 11:08:38.265077 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"13896178-eabd-4d1a-ad7c-8763c9b4f396","Type":"ContainerStarted","Data":"2fdd3794735707c54fe43c6ce837c18207cc4b6d39377cc9262a882fbd069436"} Dec 10 11:08:38 crc kubenswrapper[4682]: I1210 11:08:38.332338 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.332316789 podStartE2EDuration="5.332316789s" podCreationTimestamp="2025-12-10 11:08:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:38.322230687 +0000 UTC m=+1398.642441437" watchObservedRunningTime="2025-12-10 11:08:38.332316789 +0000 UTC m=+1398.652527539" Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.278648 4682 generic.go:334] "Generic (PLEG): container finished" podID="21cd26d9-3c93-42a2-b33a-c6c1a532806c" containerID="309f22141cec94b11a3b3c213ab0c7374559670c21a09884128a4b22676026ef" exitCode=137 Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.279216 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"21cd26d9-3c93-42a2-b33a-c6c1a532806c","Type":"ContainerDied","Data":"309f22141cec94b11a3b3c213ab0c7374559670c21a09884128a4b22676026ef"} Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.281507 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"38946e9d-e072-4758-8afb-dbdafdec204d","Type":"ContainerStarted","Data":"9b3213a2fe2a64309974fd9b6a2d95153054c07968a270e0ac39768fff864ea9"} Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.284782 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-central-agent" containerID="cri-o://24eaad1d4616ad1ff9dcde6aed47b89b45f1fdc8c5d790c07786ea483e9243f2" gracePeriod=30 Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.285072 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="proxy-httpd" containerID="cri-o://71ede49c91ea4da56f1e35d6bd09f92cd93fbf24f95e1bfb115efe80386156fa" gracePeriod=30 Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.285088 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerStarted","Data":"71ede49c91ea4da56f1e35d6bd09f92cd93fbf24f95e1bfb115efe80386156fa"} Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.285119 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.285140 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="sg-core" containerID="cri-o://d82817bf6c16ddef7569a14aa92e037db71d3d091f03fbcbd9811551a183d788" gracePeriod=30 Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.285177 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-notification-agent" containerID="cri-o://e2e01911ba9821d3565cdcea313ef52472a4bb4cbc1e4772548dcb6a1eba20b4" gracePeriod=30 Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.305868 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.305849912 podStartE2EDuration="6.305849912s" podCreationTimestamp="2025-12-10 11:08:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:39.30140578 +0000 UTC m=+1399.621616530" watchObservedRunningTime="2025-12-10 11:08:39.305849912 +0000 UTC m=+1399.626060662" Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.333330 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=10.63013923 podStartE2EDuration="17.333309645s" podCreationTimestamp="2025-12-10 11:08:22 +0000 UTC" firstStartedPulling="2025-12-10 11:08:31.84722156 +0000 UTC m=+1392.167432310" lastFinishedPulling="2025-12-10 11:08:38.550391975 +0000 UTC m=+1398.870602725" observedRunningTime="2025-12-10 11:08:39.325449029 +0000 UTC m=+1399.645659789" watchObservedRunningTime="2025-12-10 11:08:39.333309645 +0000 UTC m=+1399.653520385" Dec 10 11:08:39 crc kubenswrapper[4682]: I1210 11:08:39.936152 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.065418 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data\") pod \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.065598 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-certs\") pod \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.065712 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data-custom\") pod \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.065741 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwlnp\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-kube-api-access-qwlnp\") pod \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.066013 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-combined-ca-bundle\") pod \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.066087 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-scripts\") pod \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\" (UID: \"21cd26d9-3c93-42a2-b33a-c6c1a532806c\") " Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.072406 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-kube-api-access-qwlnp" (OuterVolumeSpecName: "kube-api-access-qwlnp") pod "21cd26d9-3c93-42a2-b33a-c6c1a532806c" (UID: "21cd26d9-3c93-42a2-b33a-c6c1a532806c"). InnerVolumeSpecName "kube-api-access-qwlnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.072406 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "21cd26d9-3c93-42a2-b33a-c6c1a532806c" (UID: "21cd26d9-3c93-42a2-b33a-c6c1a532806c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.078696 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-certs" (OuterVolumeSpecName: "certs") pod "21cd26d9-3c93-42a2-b33a-c6c1a532806c" (UID: "21cd26d9-3c93-42a2-b33a-c6c1a532806c"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.092011 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-scripts" (OuterVolumeSpecName: "scripts") pod "21cd26d9-3c93-42a2-b33a-c6c1a532806c" (UID: "21cd26d9-3c93-42a2-b33a-c6c1a532806c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.109563 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data" (OuterVolumeSpecName: "config-data") pod "21cd26d9-3c93-42a2-b33a-c6c1a532806c" (UID: "21cd26d9-3c93-42a2-b33a-c6c1a532806c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.109953 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21cd26d9-3c93-42a2-b33a-c6c1a532806c" (UID: "21cd26d9-3c93-42a2-b33a-c6c1a532806c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.169046 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.169078 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.169090 4682 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.169098 4682 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.169106 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwlnp\" (UniqueName: \"kubernetes.io/projected/21cd26d9-3c93-42a2-b33a-c6c1a532806c-kube-api-access-qwlnp\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.169115 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21cd26d9-3c93-42a2-b33a-c6c1a532806c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.296950 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"21cd26d9-3c93-42a2-b33a-c6c1a532806c","Type":"ContainerDied","Data":"b9d3f80587ff5405f092d905224a99a0532bb30d8af8f12c9b5579ace91ec2dd"} Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.296985 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.296999 4682 scope.go:117] "RemoveContainer" containerID="309f22141cec94b11a3b3c213ab0c7374559670c21a09884128a4b22676026ef" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.304445 4682 generic.go:334] "Generic (PLEG): container finished" podID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerID="71ede49c91ea4da56f1e35d6bd09f92cd93fbf24f95e1bfb115efe80386156fa" exitCode=0 Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.304498 4682 generic.go:334] "Generic (PLEG): container finished" podID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerID="d82817bf6c16ddef7569a14aa92e037db71d3d091f03fbcbd9811551a183d788" exitCode=2 Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.304509 4682 generic.go:334] "Generic (PLEG): container finished" podID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerID="e2e01911ba9821d3565cdcea313ef52472a4bb4cbc1e4772548dcb6a1eba20b4" exitCode=0 Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.304519 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerDied","Data":"71ede49c91ea4da56f1e35d6bd09f92cd93fbf24f95e1bfb115efe80386156fa"} Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.304565 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerDied","Data":"d82817bf6c16ddef7569a14aa92e037db71d3d091f03fbcbd9811551a183d788"} Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.304576 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerDied","Data":"e2e01911ba9821d3565cdcea313ef52472a4bb4cbc1e4772548dcb6a1eba20b4"} Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.451782 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.466509 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.478560 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479021 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="695c9c33-02b4-4ba2-86d6-a6def1e67513" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479038 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="695c9c33-02b4-4ba2-86d6-a6def1e67513" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479068 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19e9c14e-416c-4f11-96ff-d2ccdac04cdf" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479075 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="19e9c14e-416c-4f11-96ff-d2ccdac04cdf" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479087 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="636ce24d-c743-4ca9-b253-8c5da3d9f7c8" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479094 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="636ce24d-c743-4ca9-b253-8c5da3d9f7c8" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479110 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adb294d-5fbd-4f36-b324-8e99e2e22cee" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479116 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adb294d-5fbd-4f36-b324-8e99e2e22cee" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479122 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479128 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479143 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21cd26d9-3c93-42a2-b33a-c6c1a532806c" containerName="cloudkitty-proc" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479151 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="21cd26d9-3c93-42a2-b33a-c6c1a532806c" containerName="cloudkitty-proc" Dec 10 11:08:40 crc kubenswrapper[4682]: E1210 11:08:40.479170 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e060569d-f156-4f2e-9796-e304b2d2be0d" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479178 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e060569d-f156-4f2e-9796-e304b2d2be0d" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479379 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="19e9c14e-416c-4f11-96ff-d2ccdac04cdf" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479391 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479407 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="695c9c33-02b4-4ba2-86d6-a6def1e67513" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479418 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="21cd26d9-3c93-42a2-b33a-c6c1a532806c" containerName="cloudkitty-proc" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479425 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="636ce24d-c743-4ca9-b253-8c5da3d9f7c8" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479441 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="3adb294d-5fbd-4f36-b324-8e99e2e22cee" containerName="mariadb-database-create" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.479452 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="e060569d-f156-4f2e-9796-e304b2d2be0d" containerName="mariadb-account-create-update" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.480207 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.484862 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cloudkitty-proc-config-data" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.515306 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.608906 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/dd5568e0-970f-4053-a407-8cd3070630b8-certs\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.608978 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-scripts\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.609041 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.609120 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.609168 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-config-data\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.609232 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfnwq\" (UniqueName: \"kubernetes.io/projected/dd5568e0-970f-4053-a407-8cd3070630b8-kube-api-access-wfnwq\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.711132 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfnwq\" (UniqueName: \"kubernetes.io/projected/dd5568e0-970f-4053-a407-8cd3070630b8-kube-api-access-wfnwq\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.711241 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/dd5568e0-970f-4053-a407-8cd3070630b8-certs\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.711272 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-scripts\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.711306 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.711340 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.711375 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-config-data\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.728766 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-scripts\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.728805 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/dd5568e0-970f-4053-a407-8cd3070630b8-certs\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.728848 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-config-data-custom\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.729220 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-config-data\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.729404 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd5568e0-970f-4053-a407-8cd3070630b8-combined-ca-bundle\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.738244 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfnwq\" (UniqueName: \"kubernetes.io/projected/dd5568e0-970f-4053-a407-8cd3070630b8-kube-api-access-wfnwq\") pod \"cloudkitty-proc-0\" (UID: \"dd5568e0-970f-4053-a407-8cd3070630b8\") " pod="openstack/cloudkitty-proc-0" Dec 10 11:08:40 crc kubenswrapper[4682]: I1210 11:08:40.801025 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-proc-0" Dec 10 11:08:41 crc kubenswrapper[4682]: W1210 11:08:41.268391 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd5568e0_970f_4053_a407_8cd3070630b8.slice/crio-a33b2853941331358a1031bd17595178ab442424d191a72898a9a33e8f0325ec WatchSource:0}: Error finding container a33b2853941331358a1031bd17595178ab442424d191a72898a9a33e8f0325ec: Status 404 returned error can't find the container with id a33b2853941331358a1031bd17595178ab442424d191a72898a9a33e8f0325ec Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.270614 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-proc-0"] Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.343968 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dthrv"] Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.349574 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.359161 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"dd5568e0-970f-4053-a407-8cd3070630b8","Type":"ContainerStarted","Data":"a33b2853941331358a1031bd17595178ab442424d191a72898a9a33e8f0325ec"} Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.359204 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.359504 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kjf7r" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.359809 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.367710 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dthrv"] Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.427550 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-config-data\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.427873 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-scripts\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.427905 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.428163 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hppk9\" (UniqueName: \"kubernetes.io/projected/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-kube-api-access-hppk9\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.529635 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-scripts\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.529674 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.529806 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hppk9\" (UniqueName: \"kubernetes.io/projected/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-kube-api-access-hppk9\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.530013 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-config-data\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.534417 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-config-data\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.537253 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.537507 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-scripts\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.550376 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hppk9\" (UniqueName: \"kubernetes.io/projected/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-kube-api-access-hppk9\") pod \"nova-cell0-conductor-db-sync-dthrv\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:41 crc kubenswrapper[4682]: I1210 11:08:41.673524 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:08:42 crc kubenswrapper[4682]: I1210 11:08:42.173234 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dthrv"] Dec 10 11:08:42 crc kubenswrapper[4682]: I1210 11:08:42.374904 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dthrv" event={"ID":"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc","Type":"ContainerStarted","Data":"47b0d306535197d3d6ae3815015b9eedd0b53321a86029becf7519838351b1a0"} Dec 10 11:08:42 crc kubenswrapper[4682]: I1210 11:08:42.378709 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-proc-0" event={"ID":"dd5568e0-970f-4053-a407-8cd3070630b8","Type":"ContainerStarted","Data":"e8459f5e21359f3b0ccfdfc56db2fd146e2daa1c7de588df0a02daa0a7b552e1"} Dec 10 11:08:42 crc kubenswrapper[4682]: I1210 11:08:42.393581 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21cd26d9-3c93-42a2-b33a-c6c1a532806c" path="/var/lib/kubelet/pods/21cd26d9-3c93-42a2-b33a-c6c1a532806c/volumes" Dec 10 11:08:42 crc kubenswrapper[4682]: I1210 11:08:42.401340 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cloudkitty-proc-0" podStartSLOduration=2.401320966 podStartE2EDuration="2.401320966s" podCreationTimestamp="2025-12-10 11:08:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:08:42.398996626 +0000 UTC m=+1402.719207386" watchObservedRunningTime="2025-12-10 11:08:42.401320966 +0000 UTC m=+1402.721531726" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.444751 4682 generic.go:334] "Generic (PLEG): container finished" podID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerID="24eaad1d4616ad1ff9dcde6aed47b89b45f1fdc8c5d790c07786ea483e9243f2" exitCode=0 Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.445759 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerDied","Data":"24eaad1d4616ad1ff9dcde6aed47b89b45f1fdc8c5d790c07786ea483e9243f2"} Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.534802 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.748216 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-combined-ca-bundle\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.748763 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-run-httpd\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.748892 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-log-httpd\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.748930 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-sg-core-conf-yaml\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.749006 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9t499\" (UniqueName: \"kubernetes.io/projected/c1aa28b0-107b-411c-a7c1-9646565c49a9-kube-api-access-9t499\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.749068 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-config-data\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.749092 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-scripts\") pod \"c1aa28b0-107b-411c-a7c1-9646565c49a9\" (UID: \"c1aa28b0-107b-411c-a7c1-9646565c49a9\") " Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.750840 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.751935 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.766659 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-scripts" (OuterVolumeSpecName: "scripts") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.771228 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1aa28b0-107b-411c-a7c1-9646565c49a9-kube-api-access-9t499" (OuterVolumeSpecName: "kube-api-access-9t499") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "kube-api-access-9t499". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.851337 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9t499\" (UniqueName: \"kubernetes.io/projected/c1aa28b0-107b-411c-a7c1-9646565c49a9-kube-api-access-9t499\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.851676 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.851757 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.851874 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1aa28b0-107b-411c-a7c1-9646565c49a9-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.856676 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.932785 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.944725 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.944768 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.955256 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:43 crc kubenswrapper[4682]: I1210 11:08:43.955283 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.012184 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.058632 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-config-data" (OuterVolumeSpecName: "config-data") pod "c1aa28b0-107b-411c-a7c1-9646565c49a9" (UID: "c1aa28b0-107b-411c-a7c1-9646565c49a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.068024 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.161561 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1aa28b0-107b-411c-a7c1-9646565c49a9-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:08:44 crc kubenswrapper[4682]: E1210 11:08:44.321930 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-conmon-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.451733 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.452674 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.498860 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1aa28b0-107b-411c-a7c1-9646565c49a9","Type":"ContainerDied","Data":"0b4e3fc6d6e7968a5e3cb3be8194bdbfd190c6e0694ce2b9e4dd5992aac8e131"} Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.498984 4682 scope.go:117] "RemoveContainer" containerID="71ede49c91ea4da56f1e35d6bd09f92cd93fbf24f95e1bfb115efe80386156fa" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.499986 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.500024 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.500561 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.500891 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.558517 4682 scope.go:117] "RemoveContainer" containerID="d82817bf6c16ddef7569a14aa92e037db71d3d091f03fbcbd9811551a183d788" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.577584 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.580452 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.589975 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.606214 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:44 crc kubenswrapper[4682]: E1210 11:08:44.606796 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="sg-core" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.606820 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="sg-core" Dec 10 11:08:44 crc kubenswrapper[4682]: E1210 11:08:44.606841 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-notification-agent" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.606848 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-notification-agent" Dec 10 11:08:44 crc kubenswrapper[4682]: E1210 11:08:44.606875 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="proxy-httpd" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.606883 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="proxy-httpd" Dec 10 11:08:44 crc kubenswrapper[4682]: E1210 11:08:44.606906 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-central-agent" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.606913 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-central-agent" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.607131 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-notification-agent" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.607154 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="proxy-httpd" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.607169 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="ceilometer-central-agent" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.607181 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" containerName="sg-core" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.609127 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.611870 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.620544 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.628747 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.643112 4682 scope.go:117] "RemoveContainer" containerID="e2e01911ba9821d3565cdcea313ef52472a4bb4cbc1e4772548dcb6a1eba20b4" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.674564 4682 scope.go:117] "RemoveContainer" containerID="24eaad1d4616ad1ff9dcde6aed47b89b45f1fdc8c5d790c07786ea483e9243f2" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.778797 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-run-httpd\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.778938 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.779052 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-log-httpd\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.779091 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-config-data\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.779385 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.779783 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-scripts\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.779864 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qt8n\" (UniqueName: \"kubernetes.io/projected/9a059361-99f8-4d9e-869c-e0a0e68982c5-kube-api-access-6qt8n\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882241 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-scripts\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882288 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qt8n\" (UniqueName: \"kubernetes.io/projected/9a059361-99f8-4d9e-869c-e0a0e68982c5-kube-api-access-6qt8n\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882338 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-run-httpd\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882384 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882406 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-log-httpd\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882436 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-config-data\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.882475 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.883156 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-log-httpd\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.883835 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-run-httpd\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.888609 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-scripts\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.896173 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.896333 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.897361 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-config-data\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.900805 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qt8n\" (UniqueName: \"kubernetes.io/projected/9a059361-99f8-4d9e-869c-e0a0e68982c5-kube-api-access-6qt8n\") pod \"ceilometer-0\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " pod="openstack/ceilometer-0" Dec 10 11:08:44 crc kubenswrapper[4682]: I1210 11:08:44.942751 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:08:45 crc kubenswrapper[4682]: I1210 11:08:45.548748 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:45 crc kubenswrapper[4682]: I1210 11:08:45.560576 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:45 crc kubenswrapper[4682]: I1210 11:08:45.585251 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:46 crc kubenswrapper[4682]: I1210 11:08:46.394751 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1aa28b0-107b-411c-a7c1-9646565c49a9" path="/var/lib/kubelet/pods/c1aa28b0-107b-411c-a7c1-9646565c49a9/volumes" Dec 10 11:08:46 crc kubenswrapper[4682]: I1210 11:08:46.560977 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:08:46 crc kubenswrapper[4682]: I1210 11:08:46.561014 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:08:46 crc kubenswrapper[4682]: I1210 11:08:46.561233 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerStarted","Data":"46873ecd8c50d036209a8b856c9cd2c6b4fc2617170a1962f9821d062b1c7907"} Dec 10 11:08:47 crc kubenswrapper[4682]: I1210 11:08:47.135947 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:08:47 crc kubenswrapper[4682]: I1210 11:08:47.137097 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 10 11:08:47 crc kubenswrapper[4682]: I1210 11:08:47.573987 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:08:47 crc kubenswrapper[4682]: I1210 11:08:47.576392 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:08:48 crc kubenswrapper[4682]: I1210 11:08:48.464726 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:48 crc kubenswrapper[4682]: I1210 11:08:48.599021 4682 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 11:08:48 crc kubenswrapper[4682]: I1210 11:08:48.600204 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerStarted","Data":"102d3629ff24187e6e631a8658cf3382361e9a9e938769546dc62a3447f73577"} Dec 10 11:08:49 crc kubenswrapper[4682]: I1210 11:08:49.640679 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerStarted","Data":"9b63c91a0efbeadad6dc66a39acde72c6de31e3b9ac85f44a7e081895c0213ea"} Dec 10 11:08:49 crc kubenswrapper[4682]: I1210 11:08:49.654302 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 10 11:08:50 crc kubenswrapper[4682]: I1210 11:08:50.788430 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cloudkitty-api-0" Dec 10 11:08:54 crc kubenswrapper[4682]: E1210 11:08:54.676097 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-conmon-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:08:55 crc kubenswrapper[4682]: I1210 11:08:55.491047 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.509268 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g948t"] Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.512249 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.528834 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g948t"] Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.701822 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-utilities\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.701884 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dhs9\" (UniqueName: \"kubernetes.io/projected/463b20fe-b09f-48e1-9736-6663d0af3c81-kube-api-access-9dhs9\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.701910 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-catalog-content\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.804074 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-utilities\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.804125 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dhs9\" (UniqueName: \"kubernetes.io/projected/463b20fe-b09f-48e1-9736-6663d0af3c81-kube-api-access-9dhs9\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.804169 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-catalog-content\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.804641 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-utilities\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.805036 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-catalog-content\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.831882 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dhs9\" (UniqueName: \"kubernetes.io/projected/463b20fe-b09f-48e1-9736-6663d0af3c81-kube-api-access-9dhs9\") pod \"redhat-operators-g948t\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:56 crc kubenswrapper[4682]: I1210 11:08:56.835174 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:08:58 crc kubenswrapper[4682]: W1210 11:08:58.793862 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod463b20fe_b09f_48e1_9736_6663d0af3c81.slice/crio-8494681d65579c94fcae43a9e44a5ce1c550b5b35df3a575ee716ba7118c8497 WatchSource:0}: Error finding container 8494681d65579c94fcae43a9e44a5ce1c550b5b35df3a575ee716ba7118c8497: Status 404 returned error can't find the container with id 8494681d65579c94fcae43a9e44a5ce1c550b5b35df3a575ee716ba7118c8497 Dec 10 11:08:58 crc kubenswrapper[4682]: I1210 11:08:58.801918 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g948t"] Dec 10 11:08:58 crc kubenswrapper[4682]: I1210 11:08:58.830719 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerStarted","Data":"8494681d65579c94fcae43a9e44a5ce1c550b5b35df3a575ee716ba7118c8497"} Dec 10 11:08:58 crc kubenswrapper[4682]: I1210 11:08:58.836257 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerStarted","Data":"0a13bfc63e61d677a5bc3c4ae186050720668981d523e4b4389dbf8d1b4791c6"} Dec 10 11:08:59 crc kubenswrapper[4682]: I1210 11:08:59.847631 4682 generic.go:334] "Generic (PLEG): container finished" podID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerID="2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52" exitCode=0 Dec 10 11:08:59 crc kubenswrapper[4682]: I1210 11:08:59.847719 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerDied","Data":"2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52"} Dec 10 11:08:59 crc kubenswrapper[4682]: I1210 11:08:59.849339 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dthrv" event={"ID":"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc","Type":"ContainerStarted","Data":"c7977e71889f282b1f68b73051bd112c808ba3f97eb860dcc06afbb49a124e58"} Dec 10 11:08:59 crc kubenswrapper[4682]: I1210 11:08:59.892198 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-dthrv" podStartSLOduration=2.625883362 podStartE2EDuration="18.892173568s" podCreationTimestamp="2025-12-10 11:08:41 +0000 UTC" firstStartedPulling="2025-12-10 11:08:42.173771917 +0000 UTC m=+1402.493982667" lastFinishedPulling="2025-12-10 11:08:58.440062123 +0000 UTC m=+1418.760272873" observedRunningTime="2025-12-10 11:08:59.8892379 +0000 UTC m=+1420.209448650" watchObservedRunningTime="2025-12-10 11:08:59.892173568 +0000 UTC m=+1420.212384318" Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.880697 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerStarted","Data":"ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054"} Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.883291 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerStarted","Data":"8b0c197d0db2d3b04640d853db3453c75c73c9e0f73a7017a3f95be77d2f0899"} Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.883523 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.883571 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="proxy-httpd" containerID="cri-o://8b0c197d0db2d3b04640d853db3453c75c73c9e0f73a7017a3f95be77d2f0899" gracePeriod=30 Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.883618 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="sg-core" containerID="cri-o://0a13bfc63e61d677a5bc3c4ae186050720668981d523e4b4389dbf8d1b4791c6" gracePeriod=30 Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.883675 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-notification-agent" containerID="cri-o://9b63c91a0efbeadad6dc66a39acde72c6de31e3b9ac85f44a7e081895c0213ea" gracePeriod=30 Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.883741 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-central-agent" containerID="cri-o://102d3629ff24187e6e631a8658cf3382361e9a9e938769546dc62a3447f73577" gracePeriod=30 Dec 10 11:09:02 crc kubenswrapper[4682]: I1210 11:09:02.931934 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.82278977 podStartE2EDuration="18.931912293s" podCreationTimestamp="2025-12-10 11:08:44 +0000 UTC" firstStartedPulling="2025-12-10 11:08:45.538838348 +0000 UTC m=+1405.859049098" lastFinishedPulling="2025-12-10 11:09:01.647960871 +0000 UTC m=+1421.968171621" observedRunningTime="2025-12-10 11:09:02.927222023 +0000 UTC m=+1423.247432793" watchObservedRunningTime="2025-12-10 11:09:02.931912293 +0000 UTC m=+1423.252123043" Dec 10 11:09:03 crc kubenswrapper[4682]: I1210 11:09:03.898625 4682 generic.go:334] "Generic (PLEG): container finished" podID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerID="8b0c197d0db2d3b04640d853db3453c75c73c9e0f73a7017a3f95be77d2f0899" exitCode=0 Dec 10 11:09:03 crc kubenswrapper[4682]: I1210 11:09:03.899463 4682 generic.go:334] "Generic (PLEG): container finished" podID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerID="0a13bfc63e61d677a5bc3c4ae186050720668981d523e4b4389dbf8d1b4791c6" exitCode=2 Dec 10 11:09:03 crc kubenswrapper[4682]: I1210 11:09:03.899597 4682 generic.go:334] "Generic (PLEG): container finished" podID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerID="102d3629ff24187e6e631a8658cf3382361e9a9e938769546dc62a3447f73577" exitCode=0 Dec 10 11:09:03 crc kubenswrapper[4682]: I1210 11:09:03.898724 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerDied","Data":"8b0c197d0db2d3b04640d853db3453c75c73c9e0f73a7017a3f95be77d2f0899"} Dec 10 11:09:03 crc kubenswrapper[4682]: I1210 11:09:03.899784 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerDied","Data":"0a13bfc63e61d677a5bc3c4ae186050720668981d523e4b4389dbf8d1b4791c6"} Dec 10 11:09:03 crc kubenswrapper[4682]: I1210 11:09:03.899803 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerDied","Data":"102d3629ff24187e6e631a8658cf3382361e9a9e938769546dc62a3447f73577"} Dec 10 11:09:04 crc kubenswrapper[4682]: I1210 11:09:04.928648 4682 generic.go:334] "Generic (PLEG): container finished" podID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerID="9b63c91a0efbeadad6dc66a39acde72c6de31e3b9ac85f44a7e081895c0213ea" exitCode=0 Dec 10 11:09:04 crc kubenswrapper[4682]: I1210 11:09:04.928767 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerDied","Data":"9b63c91a0efbeadad6dc66a39acde72c6de31e3b9ac85f44a7e081895c0213ea"} Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.023541 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:05 crc kubenswrapper[4682]: E1210 11:09:05.071369 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-conmon-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.190889 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-combined-ca-bundle\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191170 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-log-httpd\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191204 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-sg-core-conf-yaml\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191259 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-config-data\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191353 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qt8n\" (UniqueName: \"kubernetes.io/projected/9a059361-99f8-4d9e-869c-e0a0e68982c5-kube-api-access-6qt8n\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191530 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-scripts\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191590 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-run-httpd\") pod \"9a059361-99f8-4d9e-869c-e0a0e68982c5\" (UID: \"9a059361-99f8-4d9e-869c-e0a0e68982c5\") " Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.191981 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.192051 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.192208 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.192226 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a059361-99f8-4d9e-869c-e0a0e68982c5-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.196917 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-scripts" (OuterVolumeSpecName: "scripts") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.201099 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a059361-99f8-4d9e-869c-e0a0e68982c5-kube-api-access-6qt8n" (OuterVolumeSpecName: "kube-api-access-6qt8n") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "kube-api-access-6qt8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.228773 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.332872 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qt8n\" (UniqueName: \"kubernetes.io/projected/9a059361-99f8-4d9e-869c-e0a0e68982c5-kube-api-access-6qt8n\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.332916 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.332929 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.402999 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.425198 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-config-data" (OuterVolumeSpecName: "config-data") pod "9a059361-99f8-4d9e-869c-e0a0e68982c5" (UID: "9a059361-99f8-4d9e-869c-e0a0e68982c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.437207 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.437522 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a059361-99f8-4d9e-869c-e0a0e68982c5-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.950541 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a059361-99f8-4d9e-869c-e0a0e68982c5","Type":"ContainerDied","Data":"46873ecd8c50d036209a8b856c9cd2c6b4fc2617170a1962f9821d062b1c7907"} Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.950825 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.950873 4682 scope.go:117] "RemoveContainer" containerID="8b0c197d0db2d3b04640d853db3453c75c73c9e0f73a7017a3f95be77d2f0899" Dec 10 11:09:05 crc kubenswrapper[4682]: I1210 11:09:05.977365 4682 scope.go:117] "RemoveContainer" containerID="0a13bfc63e61d677a5bc3c4ae186050720668981d523e4b4389dbf8d1b4791c6" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.023662 4682 scope.go:117] "RemoveContainer" containerID="9b63c91a0efbeadad6dc66a39acde72c6de31e3b9ac85f44a7e081895c0213ea" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.025572 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.052577 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.066512 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:06 crc kubenswrapper[4682]: E1210 11:09:06.066963 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-central-agent" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.066982 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-central-agent" Dec 10 11:09:06 crc kubenswrapper[4682]: E1210 11:09:06.066991 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-notification-agent" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067000 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-notification-agent" Dec 10 11:09:06 crc kubenswrapper[4682]: E1210 11:09:06.067022 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="sg-core" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067029 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="sg-core" Dec 10 11:09:06 crc kubenswrapper[4682]: E1210 11:09:06.067041 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="proxy-httpd" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067046 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="proxy-httpd" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067276 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-notification-agent" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067302 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="sg-core" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067308 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="proxy-httpd" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.067314 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" containerName="ceilometer-central-agent" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.069556 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.073199 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.073409 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.079179 4682 scope.go:117] "RemoveContainer" containerID="102d3629ff24187e6e631a8658cf3382361e9a9e938769546dc62a3447f73577" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.100505 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221510 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-run-httpd\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221631 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-config-data\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221664 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221782 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221828 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-scripts\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221846 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vk29f\" (UniqueName: \"kubernetes.io/projected/b1976646-8c73-4c29-a9e4-71527b1f3f61-kube-api-access-vk29f\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.221969 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-log-httpd\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.323231 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-log-httpd\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.323383 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-run-httpd\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.323409 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-config-data\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.323447 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.323538 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.323576 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-scripts\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.324452 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vk29f\" (UniqueName: \"kubernetes.io/projected/b1976646-8c73-4c29-a9e4-71527b1f3f61-kube-api-access-vk29f\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.324245 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-run-httpd\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.324638 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-log-httpd\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.329123 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.329322 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-scripts\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.341037 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.343937 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vk29f\" (UniqueName: \"kubernetes.io/projected/b1976646-8c73-4c29-a9e4-71527b1f3f61-kube-api-access-vk29f\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.344607 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-config-data\") pod \"ceilometer-0\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.389211 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.394824 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a059361-99f8-4d9e-869c-e0a0e68982c5" path="/var/lib/kubelet/pods/9a059361-99f8-4d9e-869c-e0a0e68982c5/volumes" Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.876510 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:06 crc kubenswrapper[4682]: I1210 11:09:06.963422 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerStarted","Data":"9d3039ba65e3037243c91bbfc9e42822987082774ab62028304f586c47325a1a"} Dec 10 11:09:08 crc kubenswrapper[4682]: I1210 11:09:08.987425 4682 generic.go:334] "Generic (PLEG): container finished" podID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerID="ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054" exitCode=0 Dec 10 11:09:08 crc kubenswrapper[4682]: I1210 11:09:08.987598 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerDied","Data":"ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054"} Dec 10 11:09:11 crc kubenswrapper[4682]: I1210 11:09:11.020979 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerStarted","Data":"1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92"} Dec 10 11:09:11 crc kubenswrapper[4682]: I1210 11:09:11.024380 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerStarted","Data":"64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4"} Dec 10 11:09:11 crc kubenswrapper[4682]: I1210 11:09:11.068428 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g948t" podStartSLOduration=4.653560693 podStartE2EDuration="15.068403532s" podCreationTimestamp="2025-12-10 11:08:56 +0000 UTC" firstStartedPulling="2025-12-10 11:08:59.85078866 +0000 UTC m=+1420.170999410" lastFinishedPulling="2025-12-10 11:09:10.265631499 +0000 UTC m=+1430.585842249" observedRunningTime="2025-12-10 11:09:11.054505386 +0000 UTC m=+1431.374716146" watchObservedRunningTime="2025-12-10 11:09:11.068403532 +0000 UTC m=+1431.388614292" Dec 10 11:09:13 crc kubenswrapper[4682]: I1210 11:09:13.049608 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerStarted","Data":"39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8"} Dec 10 11:09:15 crc kubenswrapper[4682]: E1210 11:09:15.327175 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-conmon-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54a0138b_bd8c_4f9c_8858_7c8b41798e5e.slice/crio-744ce1256a42c0c3324f81fad7ba0400804802aa47396441f722f7efbcaf7a69.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:09:16 crc kubenswrapper[4682]: I1210 11:09:16.085007 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerStarted","Data":"6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0"} Dec 10 11:09:16 crc kubenswrapper[4682]: I1210 11:09:16.835898 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:09:16 crc kubenswrapper[4682]: I1210 11:09:16.836242 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:09:17 crc kubenswrapper[4682]: I1210 11:09:17.101649 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerStarted","Data":"b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28"} Dec 10 11:09:17 crc kubenswrapper[4682]: I1210 11:09:17.101821 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:09:17 crc kubenswrapper[4682]: I1210 11:09:17.135684 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.769316336 podStartE2EDuration="12.135663298s" podCreationTimestamp="2025-12-10 11:09:05 +0000 UTC" firstStartedPulling="2025-12-10 11:09:06.877657642 +0000 UTC m=+1427.197868392" lastFinishedPulling="2025-12-10 11:09:16.244004604 +0000 UTC m=+1436.564215354" observedRunningTime="2025-12-10 11:09:17.125940487 +0000 UTC m=+1437.446151237" watchObservedRunningTime="2025-12-10 11:09:17.135663298 +0000 UTC m=+1437.455874048" Dec 10 11:09:17 crc kubenswrapper[4682]: I1210 11:09:17.884049 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g948t" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="registry-server" probeResult="failure" output=< Dec 10 11:09:17 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 11:09:17 crc kubenswrapper[4682]: > Dec 10 11:09:23 crc kubenswrapper[4682]: I1210 11:09:23.157312 4682 generic.go:334] "Generic (PLEG): container finished" podID="55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" containerID="c7977e71889f282b1f68b73051bd112c808ba3f97eb860dcc06afbb49a124e58" exitCode=0 Dec 10 11:09:23 crc kubenswrapper[4682]: I1210 11:09:23.157428 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dthrv" event={"ID":"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc","Type":"ContainerDied","Data":"c7977e71889f282b1f68b73051bd112c808ba3f97eb860dcc06afbb49a124e58"} Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.626349 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.652044 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hppk9\" (UniqueName: \"kubernetes.io/projected/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-kube-api-access-hppk9\") pod \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.652229 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-scripts\") pod \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.652301 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-combined-ca-bundle\") pod \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.652389 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-config-data\") pod \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\" (UID: \"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc\") " Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.677317 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-scripts" (OuterVolumeSpecName: "scripts") pod "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" (UID: "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.682245 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" (UID: "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.687742 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-kube-api-access-hppk9" (OuterVolumeSpecName: "kube-api-access-hppk9") pod "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" (UID: "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc"). InnerVolumeSpecName "kube-api-access-hppk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.695024 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-config-data" (OuterVolumeSpecName: "config-data") pod "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" (UID: "55ad637c-32c9-421a-a8b7-ffe9cc9eebdc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.754483 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hppk9\" (UniqueName: \"kubernetes.io/projected/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-kube-api-access-hppk9\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.754527 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.754541 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:24 crc kubenswrapper[4682]: I1210 11:09:24.754551 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.178628 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dthrv" event={"ID":"55ad637c-32c9-421a-a8b7-ffe9cc9eebdc","Type":"ContainerDied","Data":"47b0d306535197d3d6ae3815015b9eedd0b53321a86029becf7519838351b1a0"} Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.178670 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47b0d306535197d3d6ae3815015b9eedd0b53321a86029becf7519838351b1a0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.178694 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dthrv" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.382440 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 11:09:25 crc kubenswrapper[4682]: E1210 11:09:25.383180 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" containerName="nova-cell0-conductor-db-sync" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.383205 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" containerName="nova-cell0-conductor-db-sync" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.383570 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" containerName="nova-cell0-conductor-db-sync" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.384438 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.401152 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kjf7r" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.414902 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.424156 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.471883 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.472055 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw4qz\" (UniqueName: \"kubernetes.io/projected/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-kube-api-access-qw4qz\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.472172 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.573818 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw4qz\" (UniqueName: \"kubernetes.io/projected/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-kube-api-access-qw4qz\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.573919 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.574005 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.578638 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.590638 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.591199 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw4qz\" (UniqueName: \"kubernetes.io/projected/6bf01d55-c09a-4228-8a66-40d1a9f12e0d-kube-api-access-qw4qz\") pod \"nova-cell0-conductor-0\" (UID: \"6bf01d55-c09a-4228-8a66-40d1a9f12e0d\") " pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:25 crc kubenswrapper[4682]: I1210 11:09:25.705043 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:26 crc kubenswrapper[4682]: W1210 11:09:26.157386 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6bf01d55_c09a_4228_8a66_40d1a9f12e0d.slice/crio-44b40dd4da4572c99e1185075ea3803663ee621d75263771bd871f76d4209adf WatchSource:0}: Error finding container 44b40dd4da4572c99e1185075ea3803663ee621d75263771bd871f76d4209adf: Status 404 returned error can't find the container with id 44b40dd4da4572c99e1185075ea3803663ee621d75263771bd871f76d4209adf Dec 10 11:09:26 crc kubenswrapper[4682]: I1210 11:09:26.171884 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 11:09:26 crc kubenswrapper[4682]: I1210 11:09:26.196499 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6bf01d55-c09a-4228-8a66-40d1a9f12e0d","Type":"ContainerStarted","Data":"44b40dd4da4572c99e1185075ea3803663ee621d75263771bd871f76d4209adf"} Dec 10 11:09:26 crc kubenswrapper[4682]: I1210 11:09:26.922593 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:09:26 crc kubenswrapper[4682]: I1210 11:09:26.994240 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:09:27 crc kubenswrapper[4682]: I1210 11:09:27.207563 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6bf01d55-c09a-4228-8a66-40d1a9f12e0d","Type":"ContainerStarted","Data":"3ea29f327792b64f7fde01ec5aec5ff2e16d511e000ddd6c7d2395926b770b1c"} Dec 10 11:09:27 crc kubenswrapper[4682]: I1210 11:09:27.231753 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.231721827 podStartE2EDuration="2.231721827s" podCreationTimestamp="2025-12-10 11:09:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:27.220926614 +0000 UTC m=+1447.541137374" watchObservedRunningTime="2025-12-10 11:09:27.231721827 +0000 UTC m=+1447.551932617" Dec 10 11:09:27 crc kubenswrapper[4682]: I1210 11:09:27.702066 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g948t"] Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.220516 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.220722 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-g948t" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="registry-server" containerID="cri-o://64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4" gracePeriod=2 Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.808722 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.839424 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-utilities\") pod \"463b20fe-b09f-48e1-9736-6663d0af3c81\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.839464 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-catalog-content\") pod \"463b20fe-b09f-48e1-9736-6663d0af3c81\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.839586 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dhs9\" (UniqueName: \"kubernetes.io/projected/463b20fe-b09f-48e1-9736-6663d0af3c81-kube-api-access-9dhs9\") pod \"463b20fe-b09f-48e1-9736-6663d0af3c81\" (UID: \"463b20fe-b09f-48e1-9736-6663d0af3c81\") " Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.840988 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-utilities" (OuterVolumeSpecName: "utilities") pod "463b20fe-b09f-48e1-9736-6663d0af3c81" (UID: "463b20fe-b09f-48e1-9736-6663d0af3c81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.853791 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/463b20fe-b09f-48e1-9736-6663d0af3c81-kube-api-access-9dhs9" (OuterVolumeSpecName: "kube-api-access-9dhs9") pod "463b20fe-b09f-48e1-9736-6663d0af3c81" (UID: "463b20fe-b09f-48e1-9736-6663d0af3c81"). InnerVolumeSpecName "kube-api-access-9dhs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.941108 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dhs9\" (UniqueName: \"kubernetes.io/projected/463b20fe-b09f-48e1-9736-6663d0af3c81-kube-api-access-9dhs9\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.941141 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:28 crc kubenswrapper[4682]: I1210 11:09:28.951440 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "463b20fe-b09f-48e1-9736-6663d0af3c81" (UID: "463b20fe-b09f-48e1-9736-6663d0af3c81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.043330 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/463b20fe-b09f-48e1-9736-6663d0af3c81-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.237227 4682 generic.go:334] "Generic (PLEG): container finished" podID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerID="64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4" exitCode=0 Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.237312 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g948t" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.237319 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerDied","Data":"64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4"} Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.237419 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g948t" event={"ID":"463b20fe-b09f-48e1-9736-6663d0af3c81","Type":"ContainerDied","Data":"8494681d65579c94fcae43a9e44a5ce1c550b5b35df3a575ee716ba7118c8497"} Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.237452 4682 scope.go:117] "RemoveContainer" containerID="64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.275587 4682 scope.go:117] "RemoveContainer" containerID="ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.276654 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g948t"] Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.290140 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-g948t"] Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.304032 4682 scope.go:117] "RemoveContainer" containerID="2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.352895 4682 scope.go:117] "RemoveContainer" containerID="64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4" Dec 10 11:09:29 crc kubenswrapper[4682]: E1210 11:09:29.354341 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4\": container with ID starting with 64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4 not found: ID does not exist" containerID="64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.354403 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4"} err="failed to get container status \"64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4\": rpc error: code = NotFound desc = could not find container \"64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4\": container with ID starting with 64f9cae09c4663cfe2a419b31200bc0658dfeaaf208bcc7ba0acbca3d97faac4 not found: ID does not exist" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.354425 4682 scope.go:117] "RemoveContainer" containerID="ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054" Dec 10 11:09:29 crc kubenswrapper[4682]: E1210 11:09:29.354699 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054\": container with ID starting with ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054 not found: ID does not exist" containerID="ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.354720 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054"} err="failed to get container status \"ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054\": rpc error: code = NotFound desc = could not find container \"ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054\": container with ID starting with ca4060b0ca4fb1d985afdc99ebb37c7e71366b4d42cb8cda58c58a9b83f6c054 not found: ID does not exist" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.354732 4682 scope.go:117] "RemoveContainer" containerID="2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52" Dec 10 11:09:29 crc kubenswrapper[4682]: E1210 11:09:29.355022 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52\": container with ID starting with 2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52 not found: ID does not exist" containerID="2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52" Dec 10 11:09:29 crc kubenswrapper[4682]: I1210 11:09:29.355044 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52"} err="failed to get container status \"2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52\": rpc error: code = NotFound desc = could not find container \"2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52\": container with ID starting with 2e22b52066194fb7e88d4a6e976ace5a0d535ed7a48439acb9d4b0e6f2868b52 not found: ID does not exist" Dec 10 11:09:30 crc kubenswrapper[4682]: I1210 11:09:30.391203 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" path="/var/lib/kubelet/pods/463b20fe-b09f-48e1-9736-6663d0af3c81/volumes" Dec 10 11:09:35 crc kubenswrapper[4682]: I1210 11:09:35.736792 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.194734 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-2cfhl"] Dec 10 11:09:36 crc kubenswrapper[4682]: E1210 11:09:36.195500 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="registry-server" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.195519 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="registry-server" Dec 10 11:09:36 crc kubenswrapper[4682]: E1210 11:09:36.195537 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="extract-content" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.195546 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="extract-content" Dec 10 11:09:36 crc kubenswrapper[4682]: E1210 11:09:36.195573 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="extract-utilities" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.195583 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="extract-utilities" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.195849 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="463b20fe-b09f-48e1-9736-6663d0af3c81" containerName="registry-server" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.196839 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.209749 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cfhl"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.210658 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.210917 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.278420 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-scripts\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.278498 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-config-data\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.278562 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.278600 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94xdk\" (UniqueName: \"kubernetes.io/projected/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-kube-api-access-94xdk\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.382144 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-scripts\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.382683 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-config-data\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.382848 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.382984 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94xdk\" (UniqueName: \"kubernetes.io/projected/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-kube-api-access-94xdk\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.402106 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-scripts\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.402921 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.423673 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-config-data\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.426939 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94xdk\" (UniqueName: \"kubernetes.io/projected/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-kube-api-access-94xdk\") pod \"nova-cell0-cell-mapping-2cfhl\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.443074 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.487613 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.489009 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.502148 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.525844 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.526574 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.612559 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.614840 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.618154 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.665890 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.710906 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-config-data\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.710955 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds5qx\" (UniqueName: \"kubernetes.io/projected/56e630ea-02d5-4057-b395-53a41202c858-kube-api-access-ds5qx\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.711016 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.711069 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.711095 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.711125 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56e630ea-02d5-4057-b395-53a41202c858-logs\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.711208 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b9pq\" (UniqueName: \"kubernetes.io/projected/6442c083-12c8-47f2-8a74-09443168bad0-kube-api-access-2b9pq\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.758692 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.760995 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.780124 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.807540 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.812518 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b9pq\" (UniqueName: \"kubernetes.io/projected/6442c083-12c8-47f2-8a74-09443168bad0-kube-api-access-2b9pq\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.812619 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-config-data\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.814392 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds5qx\" (UniqueName: \"kubernetes.io/projected/56e630ea-02d5-4057-b395-53a41202c858-kube-api-access-ds5qx\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.814748 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.814832 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.814863 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.814897 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56e630ea-02d5-4057-b395-53a41202c858-logs\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.815281 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56e630ea-02d5-4057-b395-53a41202c858-logs\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.823582 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-config-data\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.824191 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.825078 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.826512 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.838775 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.839727 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b9pq\" (UniqueName: \"kubernetes.io/projected/6442c083-12c8-47f2-8a74-09443168bad0-kube-api-access-2b9pq\") pod \"nova-cell1-novncproxy-0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.840162 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.842713 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.854103 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.857174 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds5qx\" (UniqueName: \"kubernetes.io/projected/56e630ea-02d5-4057-b395-53a41202c858-kube-api-access-ds5qx\") pod \"nova-api-0\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " pod="openstack/nova-api-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.882794 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-d48lq"] Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.884751 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.918838 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.919360 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f4ee09a-f13e-474b-a40d-662a22124fcf-logs\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.919421 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-config-data\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.920530 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krb8v\" (UniqueName: \"kubernetes.io/projected/1f4ee09a-f13e-474b-a40d-662a22124fcf-kube-api-access-krb8v\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:36 crc kubenswrapper[4682]: I1210 11:09:36.941906 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-d48lq"] Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.017344 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024619 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-config-data\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024683 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-svc\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024742 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-config\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024778 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96hwt\" (UniqueName: \"kubernetes.io/projected/f574c330-8788-4fa7-9398-c0363d3ebcaa-kube-api-access-96hwt\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024822 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f4ee09a-f13e-474b-a40d-662a22124fcf-logs\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024844 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-config-data\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.024905 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krb8v\" (UniqueName: \"kubernetes.io/projected/1f4ee09a-f13e-474b-a40d-662a22124fcf-kube-api-access-krb8v\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025021 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-swift-storage-0\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025061 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025091 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025137 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025182 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025206 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdmln\" (UniqueName: \"kubernetes.io/projected/c8ba79fa-4920-44e2-950b-c7b6499595c0-kube-api-access-tdmln\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.025558 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f4ee09a-f13e-474b-a40d-662a22124fcf-logs\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.027983 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.034310 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-config-data\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.059274 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krb8v\" (UniqueName: \"kubernetes.io/projected/1f4ee09a-f13e-474b-a40d-662a22124fcf-kube-api-access-krb8v\") pod \"nova-metadata-0\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.128130 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.129720 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.129814 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.129864 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.129892 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdmln\" (UniqueName: \"kubernetes.io/projected/c8ba79fa-4920-44e2-950b-c7b6499595c0-kube-api-access-tdmln\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.130017 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-config-data\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.130056 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-svc\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.130087 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-config\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.130124 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96hwt\" (UniqueName: \"kubernetes.io/projected/f574c330-8788-4fa7-9398-c0363d3ebcaa-kube-api-access-96hwt\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.130252 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-swift-storage-0\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.132577 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-nb\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.132726 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.133610 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.134321 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-config\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.134388 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-svc\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.134611 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-swift-storage-0\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.134938 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.153943 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-config-data\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.158809 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96hwt\" (UniqueName: \"kubernetes.io/projected/f574c330-8788-4fa7-9398-c0363d3ebcaa-kube-api-access-96hwt\") pod \"nova-scheduler-0\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.172499 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdmln\" (UniqueName: \"kubernetes.io/projected/c8ba79fa-4920-44e2-950b-c7b6499595c0-kube-api-access-tdmln\") pod \"dnsmasq-dns-7c9cb78d75-d48lq\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.184185 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.222126 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.307090 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cfhl"] Dec 10 11:09:37 crc kubenswrapper[4682]: W1210 11:09:37.321832 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b73064f_224e_4f76_9c6b_dba2d1f1dbd7.slice/crio-57592af79d472fb0a419b4422eb6ae15212ff71c7509fea52b1653b0ee5cd567 WatchSource:0}: Error finding container 57592af79d472fb0a419b4422eb6ae15212ff71c7509fea52b1653b0ee5cd567: Status 404 returned error can't find the container with id 57592af79d472fb0a419b4422eb6ae15212ff71c7509fea52b1653b0ee5cd567 Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.354736 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cfhl" event={"ID":"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7","Type":"ContainerStarted","Data":"57592af79d472fb0a419b4422eb6ae15212ff71c7509fea52b1653b0ee5cd567"} Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.555937 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-277zt"] Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.557723 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.562280 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.562571 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.588270 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-277zt"] Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.597704 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.746436 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-config-data\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.747021 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.747081 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8kjq\" (UniqueName: \"kubernetes.io/projected/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-kube-api-access-n8kjq\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.747142 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-scripts\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.804537 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.844316 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.848558 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-config-data\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.848732 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.848765 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8kjq\" (UniqueName: \"kubernetes.io/projected/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-kube-api-access-n8kjq\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.848802 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-scripts\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.854710 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-scripts\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.854783 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-config-data\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.856123 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.869235 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8kjq\" (UniqueName: \"kubernetes.io/projected/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-kube-api-access-n8kjq\") pod \"nova-cell1-conductor-db-sync-277zt\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:37 crc kubenswrapper[4682]: I1210 11:09:37.939996 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.040982 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-d48lq"] Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.069019 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.376987 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56e630ea-02d5-4057-b395-53a41202c858","Type":"ContainerStarted","Data":"a80e1be0f20cd34809de595f1e371024c1a348fc58ce120ad6cf48172bce8b50"} Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.379008 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f4ee09a-f13e-474b-a40d-662a22124fcf","Type":"ContainerStarted","Data":"b86a0385c2867c2986a5636b7ecab40d41d1d40ace8b7249b26466c86f173149"} Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.422354 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-2cfhl" podStartSLOduration=2.422336721 podStartE2EDuration="2.422336721s" podCreationTimestamp="2025-12-10 11:09:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:38.420761824 +0000 UTC m=+1458.740972574" watchObservedRunningTime="2025-12-10 11:09:38.422336721 +0000 UTC m=+1458.742547471" Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.425687 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f574c330-8788-4fa7-9398-c0363d3ebcaa","Type":"ContainerStarted","Data":"0a66b76e2a8ebbcf68d15ba961804558854eed55fcb67349c4ad3ba5c61fb240"} Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.425722 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" event={"ID":"c8ba79fa-4920-44e2-950b-c7b6499595c0","Type":"ContainerStarted","Data":"ebea2f944dda7d11f392d40ea9f11998ac5d9fddc03ed90ab6f222af4c3c9b85"} Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.425733 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cfhl" event={"ID":"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7","Type":"ContainerStarted","Data":"cdaaa0ff2203e7460452dbc1f9d6f7b93260a55ab8cccfbddc723ead84855ad5"} Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.425750 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6442c083-12c8-47f2-8a74-09443168bad0","Type":"ContainerStarted","Data":"a1f4a9acdba28eab7fc83e8c15d1df8cf97fde1da9578f359e5b876bfe3ce2a5"} Dec 10 11:09:38 crc kubenswrapper[4682]: I1210 11:09:38.494987 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-277zt"] Dec 10 11:09:38 crc kubenswrapper[4682]: W1210 11:09:38.536581 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3cd9ca1_7529_4458_a470_d3dfeed6ad9e.slice/crio-43e789a56cb84fa7619f06fbaf05b931771d62ec758eab4932ce1f7b0448d1f2 WatchSource:0}: Error finding container 43e789a56cb84fa7619f06fbaf05b931771d62ec758eab4932ce1f7b0448d1f2: Status 404 returned error can't find the container with id 43e789a56cb84fa7619f06fbaf05b931771d62ec758eab4932ce1f7b0448d1f2 Dec 10 11:09:39 crc kubenswrapper[4682]: I1210 11:09:39.443427 4682 generic.go:334] "Generic (PLEG): container finished" podID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerID="dc5de907d4cd927bc7c05a963a67f6bce685b43c5682e0b335bf6ca7005a72e2" exitCode=0 Dec 10 11:09:39 crc kubenswrapper[4682]: I1210 11:09:39.443798 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" event={"ID":"c8ba79fa-4920-44e2-950b-c7b6499595c0","Type":"ContainerDied","Data":"dc5de907d4cd927bc7c05a963a67f6bce685b43c5682e0b335bf6ca7005a72e2"} Dec 10 11:09:39 crc kubenswrapper[4682]: I1210 11:09:39.448852 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-277zt" event={"ID":"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e","Type":"ContainerStarted","Data":"daaae517be80afb8858259496e97ff8508ff92f67c729c44b9fad5def22ed979"} Dec 10 11:09:39 crc kubenswrapper[4682]: I1210 11:09:39.448885 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-277zt" event={"ID":"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e","Type":"ContainerStarted","Data":"43e789a56cb84fa7619f06fbaf05b931771d62ec758eab4932ce1f7b0448d1f2"} Dec 10 11:09:39 crc kubenswrapper[4682]: I1210 11:09:39.498429 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-277zt" podStartSLOduration=2.498340711 podStartE2EDuration="2.498340711s" podCreationTimestamp="2025-12-10 11:09:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:39.478866829 +0000 UTC m=+1459.799077579" watchObservedRunningTime="2025-12-10 11:09:39.498340711 +0000 UTC m=+1459.818551461" Dec 10 11:09:40 crc kubenswrapper[4682]: I1210 11:09:40.240529 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:09:40 crc kubenswrapper[4682]: I1210 11:09:40.253081 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.484429 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56e630ea-02d5-4057-b395-53a41202c858","Type":"ContainerStarted","Data":"8b344cdf6a3233319a4c244e955568cf9f7199d99eb2d625b97899058aacd773"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.485047 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56e630ea-02d5-4057-b395-53a41202c858","Type":"ContainerStarted","Data":"473af833621c763dfd2f6f3c5da0d6a78d43ec36be003f233981287eaf8c9509"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.488913 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f4ee09a-f13e-474b-a40d-662a22124fcf","Type":"ContainerStarted","Data":"8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.488976 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f4ee09a-f13e-474b-a40d-662a22124fcf","Type":"ContainerStarted","Data":"d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.488967 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-log" containerID="cri-o://d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7" gracePeriod=30 Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.488986 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-metadata" containerID="cri-o://8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628" gracePeriod=30 Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.492068 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f574c330-8788-4fa7-9398-c0363d3ebcaa","Type":"ContainerStarted","Data":"9e472e45aa60c240978d1b9dda0c682ec865462a197ff12a75cf6648a43a1534"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.498773 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" event={"ID":"c8ba79fa-4920-44e2-950b-c7b6499595c0","Type":"ContainerStarted","Data":"3ec62a3bda5f29dfe0062eeb3cbf5c24afc993e135ab86d1eaf7cdc60db7a136"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.499043 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.500864 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6442c083-12c8-47f2-8a74-09443168bad0","Type":"ContainerStarted","Data":"94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd"} Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.501069 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="6442c083-12c8-47f2-8a74-09443168bad0" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd" gracePeriod=30 Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.527902 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.833730552 podStartE2EDuration="6.527881272s" podCreationTimestamp="2025-12-10 11:09:36 +0000 UTC" firstStartedPulling="2025-12-10 11:09:37.587823818 +0000 UTC m=+1457.908034568" lastFinishedPulling="2025-12-10 11:09:41.281974538 +0000 UTC m=+1461.602185288" observedRunningTime="2025-12-10 11:09:42.510715008 +0000 UTC m=+1462.830925808" watchObservedRunningTime="2025-12-10 11:09:42.527881272 +0000 UTC m=+1462.848092012" Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.539710 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" podStartSLOduration=6.539691335 podStartE2EDuration="6.539691335s" podCreationTimestamp="2025-12-10 11:09:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:42.539440907 +0000 UTC m=+1462.859651667" watchObservedRunningTime="2025-12-10 11:09:42.539691335 +0000 UTC m=+1462.859902085" Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.580314 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.156491691 podStartE2EDuration="6.58028916s" podCreationTimestamp="2025-12-10 11:09:36 +0000 UTC" firstStartedPulling="2025-12-10 11:09:37.845762037 +0000 UTC m=+1458.165972797" lastFinishedPulling="2025-12-10 11:09:41.269559516 +0000 UTC m=+1461.589770266" observedRunningTime="2025-12-10 11:09:42.563123326 +0000 UTC m=+1462.883334086" watchObservedRunningTime="2025-12-10 11:09:42.58028916 +0000 UTC m=+1462.900499910" Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.593340 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.145971716 podStartE2EDuration="6.59332303s" podCreationTimestamp="2025-12-10 11:09:36 +0000 UTC" firstStartedPulling="2025-12-10 11:09:37.819688886 +0000 UTC m=+1458.139899636" lastFinishedPulling="2025-12-10 11:09:41.2670402 +0000 UTC m=+1461.587250950" observedRunningTime="2025-12-10 11:09:42.593100163 +0000 UTC m=+1462.913310913" watchObservedRunningTime="2025-12-10 11:09:42.59332303 +0000 UTC m=+1462.913533780" Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.624146 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.394605007 podStartE2EDuration="6.624123821s" podCreationTimestamp="2025-12-10 11:09:36 +0000 UTC" firstStartedPulling="2025-12-10 11:09:38.040182046 +0000 UTC m=+1458.360392796" lastFinishedPulling="2025-12-10 11:09:41.26970086 +0000 UTC m=+1461.589911610" observedRunningTime="2025-12-10 11:09:42.608916957 +0000 UTC m=+1462.929127737" watchObservedRunningTime="2025-12-10 11:09:42.624123821 +0000 UTC m=+1462.944334581" Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.892297 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:09:42 crc kubenswrapper[4682]: I1210 11:09:42.892587 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="af0ab072-8822-403b-ac67-8689937752bd" containerName="kube-state-metrics" containerID="cri-o://52324fb463d9374fe418fc5517f460fb14e8b91050922a1ac5fb777a5aed90c6" gracePeriod=30 Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.512670 4682 generic.go:334] "Generic (PLEG): container finished" podID="af0ab072-8822-403b-ac67-8689937752bd" containerID="52324fb463d9374fe418fc5517f460fb14e8b91050922a1ac5fb777a5aed90c6" exitCode=2 Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.512767 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af0ab072-8822-403b-ac67-8689937752bd","Type":"ContainerDied","Data":"52324fb463d9374fe418fc5517f460fb14e8b91050922a1ac5fb777a5aed90c6"} Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.513887 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af0ab072-8822-403b-ac67-8689937752bd","Type":"ContainerDied","Data":"4f1c2d2fde62292e929e9364e36b6a02e75e86ecd644fa71418806e410a6fbb4"} Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.513981 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f1c2d2fde62292e929e9364e36b6a02e75e86ecd644fa71418806e410a6fbb4" Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.516199 4682 generic.go:334] "Generic (PLEG): container finished" podID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerID="d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7" exitCode=143 Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.517168 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f4ee09a-f13e-474b-a40d-662a22124fcf","Type":"ContainerDied","Data":"d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7"} Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.625192 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.775896 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsnpq\" (UniqueName: \"kubernetes.io/projected/af0ab072-8822-403b-ac67-8689937752bd-kube-api-access-vsnpq\") pod \"af0ab072-8822-403b-ac67-8689937752bd\" (UID: \"af0ab072-8822-403b-ac67-8689937752bd\") " Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.801058 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af0ab072-8822-403b-ac67-8689937752bd-kube-api-access-vsnpq" (OuterVolumeSpecName: "kube-api-access-vsnpq") pod "af0ab072-8822-403b-ac67-8689937752bd" (UID: "af0ab072-8822-403b-ac67-8689937752bd"). InnerVolumeSpecName "kube-api-access-vsnpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:43 crc kubenswrapper[4682]: I1210 11:09:43.878842 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsnpq\" (UniqueName: \"kubernetes.io/projected/af0ab072-8822-403b-ac67-8689937752bd-kube-api-access-vsnpq\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.528701 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.565065 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.576789 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.585264 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:09:44 crc kubenswrapper[4682]: E1210 11:09:44.585819 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af0ab072-8822-403b-ac67-8689937752bd" containerName="kube-state-metrics" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.585839 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="af0ab072-8822-403b-ac67-8689937752bd" containerName="kube-state-metrics" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.586021 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="af0ab072-8822-403b-ac67-8689937752bd" containerName="kube-state-metrics" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.586835 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.588916 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.590142 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.594512 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.696979 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.697371 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.697776 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtj24\" (UniqueName: \"kubernetes.io/projected/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-api-access-wtj24\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.697827 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.800061 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.800166 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.800354 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtj24\" (UniqueName: \"kubernetes.io/projected/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-api-access-wtj24\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.800380 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.806269 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.812602 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.816267 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.818742 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtj24\" (UniqueName: \"kubernetes.io/projected/d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a-kube-api-access-wtj24\") pod \"kube-state-metrics-0\" (UID: \"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a\") " pod="openstack/kube-state-metrics-0" Dec 10 11:09:44 crc kubenswrapper[4682]: I1210 11:09:44.914741 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.340158 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.340761 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-central-agent" containerID="cri-o://1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92" gracePeriod=30 Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.341302 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="proxy-httpd" containerID="cri-o://b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28" gracePeriod=30 Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.341391 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-notification-agent" containerID="cri-o://39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8" gracePeriod=30 Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.341507 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="sg-core" containerID="cri-o://6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0" gracePeriod=30 Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.388884 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 11:09:45 crc kubenswrapper[4682]: W1210 11:09:45.399225 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd282a8a0_6a52_4e7c_8fd1_0518ee8c4a7a.slice/crio-409bb9ee7b38a38ecfbab1470a8edb475aa65591c7f4e636d4c59a6104cb0bb6 WatchSource:0}: Error finding container 409bb9ee7b38a38ecfbab1470a8edb475aa65591c7f4e636d4c59a6104cb0bb6: Status 404 returned error can't find the container with id 409bb9ee7b38a38ecfbab1470a8edb475aa65591c7f4e636d4c59a6104cb0bb6 Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.562106 4682 generic.go:334] "Generic (PLEG): container finished" podID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerID="6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0" exitCode=2 Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.562165 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerDied","Data":"6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0"} Dec 10 11:09:45 crc kubenswrapper[4682]: I1210 11:09:45.571293 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a","Type":"ContainerStarted","Data":"409bb9ee7b38a38ecfbab1470a8edb475aa65591c7f4e636d4c59a6104cb0bb6"} Dec 10 11:09:46 crc kubenswrapper[4682]: E1210 11:09:46.279190 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b73064f_224e_4f76_9c6b_dba2d1f1dbd7.slice/crio-cdaaa0ff2203e7460452dbc1f9d6f7b93260a55ab8cccfbddc723ead84855ad5.scope\": RecentStats: unable to find data in memory cache]" Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.411493 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af0ab072-8822-403b-ac67-8689937752bd" path="/var/lib/kubelet/pods/af0ab072-8822-403b-ac67-8689937752bd/volumes" Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.583263 4682 generic.go:334] "Generic (PLEG): container finished" podID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerID="b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28" exitCode=0 Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.583294 4682 generic.go:334] "Generic (PLEG): container finished" podID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerID="1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92" exitCode=0 Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.583329 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerDied","Data":"b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28"} Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.583354 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerDied","Data":"1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92"} Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.584428 4682 generic.go:334] "Generic (PLEG): container finished" podID="8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" containerID="cdaaa0ff2203e7460452dbc1f9d6f7b93260a55ab8cccfbddc723ead84855ad5" exitCode=0 Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.584479 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cfhl" event={"ID":"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7","Type":"ContainerDied","Data":"cdaaa0ff2203e7460452dbc1f9d6f7b93260a55ab8cccfbddc723ead84855ad5"} Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.589924 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a","Type":"ContainerStarted","Data":"2462d3cf60801029a2c1a64d7fdca4ed3caa90e3f6fa6fb01fb375f7b88a7daf"} Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.590624 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 10 11:09:46 crc kubenswrapper[4682]: I1210 11:09:46.622266 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.090057721 podStartE2EDuration="2.622250447s" podCreationTimestamp="2025-12-10 11:09:44 +0000 UTC" firstStartedPulling="2025-12-10 11:09:45.401229218 +0000 UTC m=+1465.721439968" lastFinishedPulling="2025-12-10 11:09:45.933421944 +0000 UTC m=+1466.253632694" observedRunningTime="2025-12-10 11:09:46.619161895 +0000 UTC m=+1466.939372645" watchObservedRunningTime="2025-12-10 11:09:46.622250447 +0000 UTC m=+1466.942461197" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.018064 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.018118 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.140943 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.141187 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.141663 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.188365 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.188442 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.224394 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.227592 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.290632 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-kxg7x"] Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.292427 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" podUID="53824719-3472-4d94-be91-5a1f3176e34d" containerName="dnsmasq-dns" containerID="cri-o://e383d8a5d9567fcd4199ed7af6e99535d846658fb64dccc90e724e043afa5373" gracePeriod=10 Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.798347 4682 generic.go:334] "Generic (PLEG): container finished" podID="53824719-3472-4d94-be91-5a1f3176e34d" containerID="e383d8a5d9567fcd4199ed7af6e99535d846658fb64dccc90e724e043afa5373" exitCode=0 Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.799843 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" event={"ID":"53824719-3472-4d94-be91-5a1f3176e34d","Type":"ContainerDied","Data":"e383d8a5d9567fcd4199ed7af6e99535d846658fb64dccc90e724e043afa5373"} Dec 10 11:09:47 crc kubenswrapper[4682]: I1210 11:09:47.861260 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.101671 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.207:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.101698 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.207:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.163724 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.183398 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-sb\") pod \"53824719-3472-4d94-be91-5a1f3176e34d\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.183436 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-config\") pod \"53824719-3472-4d94-be91-5a1f3176e34d\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.183615 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-nb\") pod \"53824719-3472-4d94-be91-5a1f3176e34d\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.183657 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxbkg\" (UniqueName: \"kubernetes.io/projected/53824719-3472-4d94-be91-5a1f3176e34d-kube-api-access-sxbkg\") pod \"53824719-3472-4d94-be91-5a1f3176e34d\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.183712 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-svc\") pod \"53824719-3472-4d94-be91-5a1f3176e34d\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.183732 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-swift-storage-0\") pod \"53824719-3472-4d94-be91-5a1f3176e34d\" (UID: \"53824719-3472-4d94-be91-5a1f3176e34d\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.205116 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53824719-3472-4d94-be91-5a1f3176e34d-kube-api-access-sxbkg" (OuterVolumeSpecName: "kube-api-access-sxbkg") pod "53824719-3472-4d94-be91-5a1f3176e34d" (UID: "53824719-3472-4d94-be91-5a1f3176e34d"). InnerVolumeSpecName "kube-api-access-sxbkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.268141 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "53824719-3472-4d94-be91-5a1f3176e34d" (UID: "53824719-3472-4d94-be91-5a1f3176e34d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.282078 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "53824719-3472-4d94-be91-5a1f3176e34d" (UID: "53824719-3472-4d94-be91-5a1f3176e34d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.283900 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-config" (OuterVolumeSpecName: "config") pod "53824719-3472-4d94-be91-5a1f3176e34d" (UID: "53824719-3472-4d94-be91-5a1f3176e34d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.285718 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.285748 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxbkg\" (UniqueName: \"kubernetes.io/projected/53824719-3472-4d94-be91-5a1f3176e34d-kube-api-access-sxbkg\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.285762 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.285773 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.302997 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "53824719-3472-4d94-be91-5a1f3176e34d" (UID: "53824719-3472-4d94-be91-5a1f3176e34d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.304685 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "53824719-3472-4d94-be91-5a1f3176e34d" (UID: "53824719-3472-4d94-be91-5a1f3176e34d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.317687 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.386854 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-scripts\") pod \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.386902 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-config-data\") pod \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.386983 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-combined-ca-bundle\") pod \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.387154 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94xdk\" (UniqueName: \"kubernetes.io/projected/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-kube-api-access-94xdk\") pod \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\" (UID: \"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7\") " Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.387647 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.387675 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/53824719-3472-4d94-be91-5a1f3176e34d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.390498 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-scripts" (OuterVolumeSpecName: "scripts") pod "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" (UID: "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.392619 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-kube-api-access-94xdk" (OuterVolumeSpecName: "kube-api-access-94xdk") pod "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" (UID: "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7"). InnerVolumeSpecName "kube-api-access-94xdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.430943 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-config-data" (OuterVolumeSpecName: "config-data") pod "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" (UID: "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.443486 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" (UID: "8b73064f-224e-4f76-9c6b-dba2d1f1dbd7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.490363 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.490401 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94xdk\" (UniqueName: \"kubernetes.io/projected/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-kube-api-access-94xdk\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.490419 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.490432 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.811765 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" event={"ID":"53824719-3472-4d94-be91-5a1f3176e34d","Type":"ContainerDied","Data":"a55b683228cbbc86462e0a9911160fd91c6c4b8d0838effa934193820bf9a00f"} Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.811805 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86d9875b97-kxg7x" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.811829 4682 scope.go:117] "RemoveContainer" containerID="e383d8a5d9567fcd4199ed7af6e99535d846658fb64dccc90e724e043afa5373" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.816273 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cfhl" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.816708 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cfhl" event={"ID":"8b73064f-224e-4f76-9c6b-dba2d1f1dbd7","Type":"ContainerDied","Data":"57592af79d472fb0a419b4422eb6ae15212ff71c7509fea52b1653b0ee5cd567"} Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.816831 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57592af79d472fb0a419b4422eb6ae15212ff71c7509fea52b1653b0ee5cd567" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.860560 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-kxg7x"] Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.869759 4682 scope.go:117] "RemoveContainer" containerID="339c9d985c738cfba338b2d80c216ff6ee2eb2eea81db6d073a77a17ec310825" Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.870381 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86d9875b97-kxg7x"] Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.968644 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.979542 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.979984 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-api" containerID="cri-o://8b344cdf6a3233319a4c244e955568cf9f7199d99eb2d625b97899058aacd773" gracePeriod=30 Dec 10 11:09:48 crc kubenswrapper[4682]: I1210 11:09:48.980329 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-log" containerID="cri-o://473af833621c763dfd2f6f3c5da0d6a78d43ec36be003f233981287eaf8c9509" gracePeriod=30 Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.538738 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614005 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-sg-core-conf-yaml\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614115 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vk29f\" (UniqueName: \"kubernetes.io/projected/b1976646-8c73-4c29-a9e4-71527b1f3f61-kube-api-access-vk29f\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614150 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-combined-ca-bundle\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614194 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-scripts\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614289 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-run-httpd\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614346 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-config-data\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.614376 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-log-httpd\") pod \"b1976646-8c73-4c29-a9e4-71527b1f3f61\" (UID: \"b1976646-8c73-4c29-a9e4-71527b1f3f61\") " Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.615097 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.615237 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.641107 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-scripts" (OuterVolumeSpecName: "scripts") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.642614 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1976646-8c73-4c29-a9e4-71527b1f3f61-kube-api-access-vk29f" (OuterVolumeSpecName: "kube-api-access-vk29f") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "kube-api-access-vk29f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.668625 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.761618 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.761937 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.761953 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vk29f\" (UniqueName: \"kubernetes.io/projected/b1976646-8c73-4c29-a9e4-71527b1f3f61-kube-api-access-vk29f\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.761965 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.761978 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b1976646-8c73-4c29-a9e4-71527b1f3f61-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.808864 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.839025 4682 generic.go:334] "Generic (PLEG): container finished" podID="56e630ea-02d5-4057-b395-53a41202c858" containerID="473af833621c763dfd2f6f3c5da0d6a78d43ec36be003f233981287eaf8c9509" exitCode=143 Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.839091 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56e630ea-02d5-4057-b395-53a41202c858","Type":"ContainerDied","Data":"473af833621c763dfd2f6f3c5da0d6a78d43ec36be003f233981287eaf8c9509"} Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.840994 4682 generic.go:334] "Generic (PLEG): container finished" podID="d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" containerID="daaae517be80afb8858259496e97ff8508ff92f67c729c44b9fad5def22ed979" exitCode=0 Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.841060 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-277zt" event={"ID":"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e","Type":"ContainerDied","Data":"daaae517be80afb8858259496e97ff8508ff92f67c729c44b9fad5def22ed979"} Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.845857 4682 generic.go:334] "Generic (PLEG): container finished" podID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerID="39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8" exitCode=0 Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.845937 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.845991 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerDied","Data":"39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8"} Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.846020 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b1976646-8c73-4c29-a9e4-71527b1f3f61","Type":"ContainerDied","Data":"9d3039ba65e3037243c91bbfc9e42822987082774ab62028304f586c47325a1a"} Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.846037 4682 scope.go:117] "RemoveContainer" containerID="b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.846033 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f574c330-8788-4fa7-9398-c0363d3ebcaa" containerName="nova-scheduler-scheduler" containerID="cri-o://9e472e45aa60c240978d1b9dda0c682ec865462a197ff12a75cf6648a43a1534" gracePeriod=30 Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.863987 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.904546 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-config-data" (OuterVolumeSpecName: "config-data") pod "b1976646-8c73-4c29-a9e4-71527b1f3f61" (UID: "b1976646-8c73-4c29-a9e4-71527b1f3f61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.955882 4682 scope.go:117] "RemoveContainer" containerID="6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.968046 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1976646-8c73-4c29-a9e4-71527b1f3f61-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.976898 4682 scope.go:117] "RemoveContainer" containerID="39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8" Dec 10 11:09:49 crc kubenswrapper[4682]: I1210 11:09:49.995849 4682 scope.go:117] "RemoveContainer" containerID="1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.018396 4682 scope.go:117] "RemoveContainer" containerID="b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.018777 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28\": container with ID starting with b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28 not found: ID does not exist" containerID="b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.018809 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28"} err="failed to get container status \"b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28\": rpc error: code = NotFound desc = could not find container \"b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28\": container with ID starting with b35e83a658e2fd48c52ad9f456b48123f74e19434e28eb7e2f573e359da9dd28 not found: ID does not exist" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.018830 4682 scope.go:117] "RemoveContainer" containerID="6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.019177 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0\": container with ID starting with 6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0 not found: ID does not exist" containerID="6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.019222 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0"} err="failed to get container status \"6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0\": rpc error: code = NotFound desc = could not find container \"6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0\": container with ID starting with 6e0db9038a6a4e2eb441dd41c29f91f24298f044771f61aa94da79aab5d082a0 not found: ID does not exist" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.019258 4682 scope.go:117] "RemoveContainer" containerID="39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.019853 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8\": container with ID starting with 39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8 not found: ID does not exist" containerID="39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.019883 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8"} err="failed to get container status \"39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8\": rpc error: code = NotFound desc = could not find container \"39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8\": container with ID starting with 39a8adcd86c4e1fafeb0ddb1bb079e59d4aa7b03fcbb8713b52ed781fbc972b8 not found: ID does not exist" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.019903 4682 scope.go:117] "RemoveContainer" containerID="1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.020181 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92\": container with ID starting with 1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92 not found: ID does not exist" containerID="1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.020210 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92"} err="failed to get container status \"1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92\": rpc error: code = NotFound desc = could not find container \"1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92\": container with ID starting with 1c2fbb0868dda0fd77e32030ce0cb234929b1c619ebada2abcfd8e688c116d92 not found: ID does not exist" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.235453 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.267316 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.277546 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278005 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="proxy-httpd" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278021 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="proxy-httpd" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278038 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53824719-3472-4d94-be91-5a1f3176e34d" containerName="init" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278045 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="53824719-3472-4d94-be91-5a1f3176e34d" containerName="init" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278058 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-notification-agent" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278065 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-notification-agent" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278075 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-central-agent" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278080 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-central-agent" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278096 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="sg-core" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278101 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="sg-core" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278115 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53824719-3472-4d94-be91-5a1f3176e34d" containerName="dnsmasq-dns" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278121 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="53824719-3472-4d94-be91-5a1f3176e34d" containerName="dnsmasq-dns" Dec 10 11:09:50 crc kubenswrapper[4682]: E1210 11:09:50.278137 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" containerName="nova-manage" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278143 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" containerName="nova-manage" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278325 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="53824719-3472-4d94-be91-5a1f3176e34d" containerName="dnsmasq-dns" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278339 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="proxy-httpd" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278351 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-central-agent" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278362 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" containerName="nova-manage" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278374 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="ceilometer-notification-agent" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.278387 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" containerName="sg-core" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.280229 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.285117 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.285552 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.291353 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.293188 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376568 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-config-data\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376627 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-scripts\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376653 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-log-httpd\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376699 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376738 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376760 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-run-httpd\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376820 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krmxh\" (UniqueName: \"kubernetes.io/projected/60fa05d8-74a9-4960-bbb8-ceed10ea183c-kube-api-access-krmxh\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.376903 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.395550 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53824719-3472-4d94-be91-5a1f3176e34d" path="/var/lib/kubelet/pods/53824719-3472-4d94-be91-5a1f3176e34d/volumes" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.396354 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1976646-8c73-4c29-a9e4-71527b1f3f61" path="/var/lib/kubelet/pods/b1976646-8c73-4c29-a9e4-71527b1f3f61/volumes" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.478702 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-config-data\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.478942 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-scripts\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.478961 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-log-httpd\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.478980 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.479036 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.479053 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-run-httpd\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.479098 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krmxh\" (UniqueName: \"kubernetes.io/projected/60fa05d8-74a9-4960-bbb8-ceed10ea183c-kube-api-access-krmxh\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.479118 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.479616 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-log-httpd\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.481571 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-run-httpd\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.482667 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.482878 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.483160 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-config-data\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.483321 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.490997 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-scripts\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.495783 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krmxh\" (UniqueName: \"kubernetes.io/projected/60fa05d8-74a9-4960-bbb8-ceed10ea183c-kube-api-access-krmxh\") pod \"ceilometer-0\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.617945 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.858978 4682 generic.go:334] "Generic (PLEG): container finished" podID="f574c330-8788-4fa7-9398-c0363d3ebcaa" containerID="9e472e45aa60c240978d1b9dda0c682ec865462a197ff12a75cf6648a43a1534" exitCode=0 Dec 10 11:09:50 crc kubenswrapper[4682]: I1210 11:09:50.859048 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f574c330-8788-4fa7-9398-c0363d3ebcaa","Type":"ContainerDied","Data":"9e472e45aa60c240978d1b9dda0c682ec865462a197ff12a75cf6648a43a1534"} Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.180340 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:09:51 crc kubenswrapper[4682]: W1210 11:09:51.235664 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60fa05d8_74a9_4960_bbb8_ceed10ea183c.slice/crio-4e017f0d51cbbf661766c84579da1d7ea78586fc70830e2684e0a52d2e6821e8 WatchSource:0}: Error finding container 4e017f0d51cbbf661766c84579da1d7ea78586fc70830e2684e0a52d2e6821e8: Status 404 returned error can't find the container with id 4e017f0d51cbbf661766c84579da1d7ea78586fc70830e2684e0a52d2e6821e8 Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.239628 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.306045 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-config-data\") pod \"f574c330-8788-4fa7-9398-c0363d3ebcaa\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.306083 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96hwt\" (UniqueName: \"kubernetes.io/projected/f574c330-8788-4fa7-9398-c0363d3ebcaa-kube-api-access-96hwt\") pod \"f574c330-8788-4fa7-9398-c0363d3ebcaa\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.306145 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-combined-ca-bundle\") pod \"f574c330-8788-4fa7-9398-c0363d3ebcaa\" (UID: \"f574c330-8788-4fa7-9398-c0363d3ebcaa\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.311870 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f574c330-8788-4fa7-9398-c0363d3ebcaa-kube-api-access-96hwt" (OuterVolumeSpecName: "kube-api-access-96hwt") pod "f574c330-8788-4fa7-9398-c0363d3ebcaa" (UID: "f574c330-8788-4fa7-9398-c0363d3ebcaa"). InnerVolumeSpecName "kube-api-access-96hwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.324079 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.371515 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-config-data" (OuterVolumeSpecName: "config-data") pod "f574c330-8788-4fa7-9398-c0363d3ebcaa" (UID: "f574c330-8788-4fa7-9398-c0363d3ebcaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.388421 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f574c330-8788-4fa7-9398-c0363d3ebcaa" (UID: "f574c330-8788-4fa7-9398-c0363d3ebcaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.409192 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.409234 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96hwt\" (UniqueName: \"kubernetes.io/projected/f574c330-8788-4fa7-9398-c0363d3ebcaa-kube-api-access-96hwt\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.409250 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f574c330-8788-4fa7-9398-c0363d3ebcaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.509962 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-combined-ca-bundle\") pod \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.510084 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-scripts\") pod \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.510195 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8kjq\" (UniqueName: \"kubernetes.io/projected/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-kube-api-access-n8kjq\") pod \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.510311 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-config-data\") pod \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\" (UID: \"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e\") " Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.513750 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-scripts" (OuterVolumeSpecName: "scripts") pod "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" (UID: "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.518625 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-kube-api-access-n8kjq" (OuterVolumeSpecName: "kube-api-access-n8kjq") pod "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" (UID: "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e"). InnerVolumeSpecName "kube-api-access-n8kjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.537221 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" (UID: "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.544589 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-config-data" (OuterVolumeSpecName: "config-data") pod "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" (UID: "d3cd9ca1-7529-4458-a470-d3dfeed6ad9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.612961 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.612999 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.613011 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8kjq\" (UniqueName: \"kubernetes.io/projected/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-kube-api-access-n8kjq\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.613024 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.914271 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.914266 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f574c330-8788-4fa7-9398-c0363d3ebcaa","Type":"ContainerDied","Data":"0a66b76e2a8ebbcf68d15ba961804558854eed55fcb67349c4ad3ba5c61fb240"} Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.914455 4682 scope.go:117] "RemoveContainer" containerID="9e472e45aa60c240978d1b9dda0c682ec865462a197ff12a75cf6648a43a1534" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.919417 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-277zt" event={"ID":"d3cd9ca1-7529-4458-a470-d3dfeed6ad9e","Type":"ContainerDied","Data":"43e789a56cb84fa7619f06fbaf05b931771d62ec758eab4932ce1f7b0448d1f2"} Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.919484 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43e789a56cb84fa7619f06fbaf05b931771d62ec758eab4932ce1f7b0448d1f2" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.919554 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-277zt" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.934660 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerStarted","Data":"4e017f0d51cbbf661766c84579da1d7ea78586fc70830e2684e0a52d2e6821e8"} Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.991326 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 11:09:51 crc kubenswrapper[4682]: E1210 11:09:51.991956 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f574c330-8788-4fa7-9398-c0363d3ebcaa" containerName="nova-scheduler-scheduler" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.991976 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f574c330-8788-4fa7-9398-c0363d3ebcaa" containerName="nova-scheduler-scheduler" Dec 10 11:09:51 crc kubenswrapper[4682]: E1210 11:09:51.991995 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" containerName="nova-cell1-conductor-db-sync" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.992001 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" containerName="nova-cell1-conductor-db-sync" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.992189 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f574c330-8788-4fa7-9398-c0363d3ebcaa" containerName="nova-scheduler-scheduler" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.992222 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" containerName="nova-cell1-conductor-db-sync" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.993040 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:51 crc kubenswrapper[4682]: I1210 11:09:51.995833 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.024596 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adedb4ee-2f85-464c-8a00-83a86ec2ad28-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.024808 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adedb4ee-2f85-464c-8a00-83a86ec2ad28-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.024860 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwzhq\" (UniqueName: \"kubernetes.io/projected/adedb4ee-2f85-464c-8a00-83a86ec2ad28-kube-api-access-hwzhq\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.046855 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.060017 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.076044 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.085984 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.087602 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.090413 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.124039 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.126274 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adedb4ee-2f85-464c-8a00-83a86ec2ad28-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.126342 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.126393 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adedb4ee-2f85-464c-8a00-83a86ec2ad28-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.126419 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwzhq\" (UniqueName: \"kubernetes.io/projected/adedb4ee-2f85-464c-8a00-83a86ec2ad28-kube-api-access-hwzhq\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.126451 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l7gk\" (UniqueName: \"kubernetes.io/projected/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-kube-api-access-2l7gk\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.126575 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-config-data\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.132849 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adedb4ee-2f85-464c-8a00-83a86ec2ad28-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.134657 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adedb4ee-2f85-464c-8a00-83a86ec2ad28-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.147326 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwzhq\" (UniqueName: \"kubernetes.io/projected/adedb4ee-2f85-464c-8a00-83a86ec2ad28-kube-api-access-hwzhq\") pod \"nova-cell1-conductor-0\" (UID: \"adedb4ee-2f85-464c-8a00-83a86ec2ad28\") " pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.229150 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.229439 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l7gk\" (UniqueName: \"kubernetes.io/projected/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-kube-api-access-2l7gk\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.229517 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-config-data\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.233300 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-config-data\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.234306 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.246163 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l7gk\" (UniqueName: \"kubernetes.io/projected/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-kube-api-access-2l7gk\") pod \"nova-scheduler-0\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.334186 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.394147 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f574c330-8788-4fa7-9398-c0363d3ebcaa" path="/var/lib/kubelet/pods/f574c330-8788-4fa7-9398-c0363d3ebcaa/volumes" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.418369 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.917245 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.956144 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"adedb4ee-2f85-464c-8a00-83a86ec2ad28","Type":"ContainerStarted","Data":"061eb4f19753923981b907440b175c8f698ba7607f8100f701d569a2df19d02e"} Dec 10 11:09:52 crc kubenswrapper[4682]: I1210 11:09:52.960004 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerStarted","Data":"a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41"} Dec 10 11:09:53 crc kubenswrapper[4682]: I1210 11:09:53.044551 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:09:53 crc kubenswrapper[4682]: W1210 11:09:53.051888 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24421d2d_02fd_4c84_91ef_fbc5b8754a9f.slice/crio-6ddae70184a65aa8d7a894de7542e77c70007c45b54d8c8bdfd0d53e0cfb1f8a WatchSource:0}: Error finding container 6ddae70184a65aa8d7a894de7542e77c70007c45b54d8c8bdfd0d53e0cfb1f8a: Status 404 returned error can't find the container with id 6ddae70184a65aa8d7a894de7542e77c70007c45b54d8c8bdfd0d53e0cfb1f8a Dec 10 11:09:53 crc kubenswrapper[4682]: I1210 11:09:53.972007 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"adedb4ee-2f85-464c-8a00-83a86ec2ad28","Type":"ContainerStarted","Data":"5000aeebaa38d339cab229be9875ed55ed27e8410bf6e525216c61b38d4ce3e7"} Dec 10 11:09:53 crc kubenswrapper[4682]: I1210 11:09:53.972340 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 10 11:09:53 crc kubenswrapper[4682]: I1210 11:09:53.973812 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"24421d2d-02fd-4c84-91ef-fbc5b8754a9f","Type":"ContainerStarted","Data":"2998187a8fe5f50e56e8c7333d72338ed1c51ff4c0b98d6c41f2bd11ffe43d90"} Dec 10 11:09:53 crc kubenswrapper[4682]: I1210 11:09:53.973850 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"24421d2d-02fd-4c84-91ef-fbc5b8754a9f","Type":"ContainerStarted","Data":"6ddae70184a65aa8d7a894de7542e77c70007c45b54d8c8bdfd0d53e0cfb1f8a"} Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.005463 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.005441213 podStartE2EDuration="3.005441213s" podCreationTimestamp="2025-12-10 11:09:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:54.000143845 +0000 UTC m=+1474.320354605" watchObservedRunningTime="2025-12-10 11:09:54.005441213 +0000 UTC m=+1474.325651963" Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.018584 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.018566956 podStartE2EDuration="2.018566956s" podCreationTimestamp="2025-12-10 11:09:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:54.016405351 +0000 UTC m=+1474.336616101" watchObservedRunningTime="2025-12-10 11:09:54.018566956 +0000 UTC m=+1474.338777706" Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.930217 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.990075 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerStarted","Data":"5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba"} Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.990116 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerStarted","Data":"aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87"} Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.992572 4682 generic.go:334] "Generic (PLEG): container finished" podID="56e630ea-02d5-4057-b395-53a41202c858" containerID="8b344cdf6a3233319a4c244e955568cf9f7199d99eb2d625b97899058aacd773" exitCode=0 Dec 10 11:09:54 crc kubenswrapper[4682]: I1210 11:09:54.993605 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56e630ea-02d5-4057-b395-53a41202c858","Type":"ContainerDied","Data":"8b344cdf6a3233319a4c244e955568cf9f7199d99eb2d625b97899058aacd773"} Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.121142 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.301065 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-combined-ca-bundle\") pod \"56e630ea-02d5-4057-b395-53a41202c858\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.301255 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56e630ea-02d5-4057-b395-53a41202c858-logs\") pod \"56e630ea-02d5-4057-b395-53a41202c858\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.301324 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds5qx\" (UniqueName: \"kubernetes.io/projected/56e630ea-02d5-4057-b395-53a41202c858-kube-api-access-ds5qx\") pod \"56e630ea-02d5-4057-b395-53a41202c858\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.301422 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-config-data\") pod \"56e630ea-02d5-4057-b395-53a41202c858\" (UID: \"56e630ea-02d5-4057-b395-53a41202c858\") " Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.301869 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56e630ea-02d5-4057-b395-53a41202c858-logs" (OuterVolumeSpecName: "logs") pod "56e630ea-02d5-4057-b395-53a41202c858" (UID: "56e630ea-02d5-4057-b395-53a41202c858"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.307863 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56e630ea-02d5-4057-b395-53a41202c858-kube-api-access-ds5qx" (OuterVolumeSpecName: "kube-api-access-ds5qx") pod "56e630ea-02d5-4057-b395-53a41202c858" (UID: "56e630ea-02d5-4057-b395-53a41202c858"). InnerVolumeSpecName "kube-api-access-ds5qx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.333804 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56e630ea-02d5-4057-b395-53a41202c858" (UID: "56e630ea-02d5-4057-b395-53a41202c858"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.346343 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-config-data" (OuterVolumeSpecName: "config-data") pod "56e630ea-02d5-4057-b395-53a41202c858" (UID: "56e630ea-02d5-4057-b395-53a41202c858"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.403293 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56e630ea-02d5-4057-b395-53a41202c858-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.403329 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds5qx\" (UniqueName: \"kubernetes.io/projected/56e630ea-02d5-4057-b395-53a41202c858-kube-api-access-ds5qx\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.403340 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:55 crc kubenswrapper[4682]: I1210 11:09:55.403349 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56e630ea-02d5-4057-b395-53a41202c858-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.004613 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56e630ea-02d5-4057-b395-53a41202c858","Type":"ContainerDied","Data":"a80e1be0f20cd34809de595f1e371024c1a348fc58ce120ad6cf48172bce8b50"} Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.004929 4682 scope.go:117] "RemoveContainer" containerID="8b344cdf6a3233319a4c244e955568cf9f7199d99eb2d625b97899058aacd773" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.004692 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.042990 4682 scope.go:117] "RemoveContainer" containerID="473af833621c763dfd2f6f3c5da0d6a78d43ec36be003f233981287eaf8c9509" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.045284 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.056833 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.067565 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:56 crc kubenswrapper[4682]: E1210 11:09:56.068354 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-api" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.068515 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-api" Dec 10 11:09:56 crc kubenswrapper[4682]: E1210 11:09:56.068651 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-log" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.068746 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-log" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.069077 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-log" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.069186 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="56e630ea-02d5-4057-b395-53a41202c858" containerName="nova-api-api" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.072126 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.076030 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.091714 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.226186 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.226258 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf75b\" (UniqueName: \"kubernetes.io/projected/83387941-b169-462c-9fc2-0ccfa3a8ab49-kube-api-access-vf75b\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.226333 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83387941-b169-462c-9fc2-0ccfa3a8ab49-logs\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.226384 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-config-data\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.327773 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-config-data\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.328234 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.328324 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vf75b\" (UniqueName: \"kubernetes.io/projected/83387941-b169-462c-9fc2-0ccfa3a8ab49-kube-api-access-vf75b\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.328445 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83387941-b169-462c-9fc2-0ccfa3a8ab49-logs\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.328946 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83387941-b169-462c-9fc2-0ccfa3a8ab49-logs\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.336796 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.351418 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vf75b\" (UniqueName: \"kubernetes.io/projected/83387941-b169-462c-9fc2-0ccfa3a8ab49-kube-api-access-vf75b\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.353299 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-config-data\") pod \"nova-api-0\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.400572 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56e630ea-02d5-4057-b395-53a41202c858" path="/var/lib/kubelet/pods/56e630ea-02d5-4057-b395-53a41202c858/volumes" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.410736 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:09:56 crc kubenswrapper[4682]: I1210 11:09:56.885443 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:09:57 crc kubenswrapper[4682]: I1210 11:09:57.018698 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerStarted","Data":"62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec"} Dec 10 11:09:57 crc kubenswrapper[4682]: I1210 11:09:57.020265 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:09:57 crc kubenswrapper[4682]: I1210 11:09:57.024580 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83387941-b169-462c-9fc2-0ccfa3a8ab49","Type":"ContainerStarted","Data":"7183866cd6fd61e6046b03367a6e9b247651e70f1e516ddd07955f350eee40f3"} Dec 10 11:09:57 crc kubenswrapper[4682]: I1210 11:09:57.046589 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.18513236 podStartE2EDuration="7.04656771s" podCreationTimestamp="2025-12-10 11:09:50 +0000 UTC" firstStartedPulling="2025-12-10 11:09:51.24246244 +0000 UTC m=+1471.562673180" lastFinishedPulling="2025-12-10 11:09:56.10389778 +0000 UTC m=+1476.424108530" observedRunningTime="2025-12-10 11:09:57.041377334 +0000 UTC m=+1477.361588114" watchObservedRunningTime="2025-12-10 11:09:57.04656771 +0000 UTC m=+1477.366778460" Dec 10 11:09:57 crc kubenswrapper[4682]: I1210 11:09:57.419368 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 11:09:58 crc kubenswrapper[4682]: I1210 11:09:58.040824 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83387941-b169-462c-9fc2-0ccfa3a8ab49","Type":"ContainerStarted","Data":"cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86"} Dec 10 11:09:58 crc kubenswrapper[4682]: I1210 11:09:58.040876 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83387941-b169-462c-9fc2-0ccfa3a8ab49","Type":"ContainerStarted","Data":"2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba"} Dec 10 11:09:58 crc kubenswrapper[4682]: I1210 11:09:58.061569 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.061547314 podStartE2EDuration="2.061547314s" podCreationTimestamp="2025-12-10 11:09:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:09:58.056214925 +0000 UTC m=+1478.376425675" watchObservedRunningTime="2025-12-10 11:09:58.061547314 +0000 UTC m=+1478.381758084" Dec 10 11:10:02 crc kubenswrapper[4682]: I1210 11:10:02.367596 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 10 11:10:02 crc kubenswrapper[4682]: I1210 11:10:02.419526 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 11:10:02 crc kubenswrapper[4682]: I1210 11:10:02.448864 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 11:10:03 crc kubenswrapper[4682]: I1210 11:10:03.122305 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 11:10:06 crc kubenswrapper[4682]: I1210 11:10:06.411702 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:10:06 crc kubenswrapper[4682]: I1210 11:10:06.411737 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:10:07 crc kubenswrapper[4682]: I1210 11:10:07.497674 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:07 crc kubenswrapper[4682]: I1210 11:10:07.497674 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.120540 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.130604 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.197104 4682 generic.go:334] "Generic (PLEG): container finished" podID="6442c083-12c8-47f2-8a74-09443168bad0" containerID="94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd" exitCode=137 Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.197157 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.197188 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6442c083-12c8-47f2-8a74-09443168bad0","Type":"ContainerDied","Data":"94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd"} Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.197245 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"6442c083-12c8-47f2-8a74-09443168bad0","Type":"ContainerDied","Data":"a1f4a9acdba28eab7fc83e8c15d1df8cf97fde1da9578f359e5b876bfe3ce2a5"} Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.197266 4682 scope.go:117] "RemoveContainer" containerID="94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.200312 4682 generic.go:334] "Generic (PLEG): container finished" podID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerID="8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628" exitCode=137 Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.200351 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f4ee09a-f13e-474b-a40d-662a22124fcf","Type":"ContainerDied","Data":"8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628"} Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.200363 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.200377 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f4ee09a-f13e-474b-a40d-662a22124fcf","Type":"ContainerDied","Data":"b86a0385c2867c2986a5636b7ecab40d41d1d40ace8b7249b26466c86f173149"} Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.227575 4682 scope.go:117] "RemoveContainer" containerID="94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd" Dec 10 11:10:13 crc kubenswrapper[4682]: E1210 11:10:13.227984 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd\": container with ID starting with 94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd not found: ID does not exist" containerID="94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.228011 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd"} err="failed to get container status \"94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd\": rpc error: code = NotFound desc = could not find container \"94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd\": container with ID starting with 94f2cca131c5f863aacee42cfe8b20216eb54a8cd61fd80a74da11adf19ca8cd not found: ID does not exist" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.228030 4682 scope.go:117] "RemoveContainer" containerID="8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.248838 4682 scope.go:117] "RemoveContainer" containerID="d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.267071 4682 scope.go:117] "RemoveContainer" containerID="8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628" Dec 10 11:10:13 crc kubenswrapper[4682]: E1210 11:10:13.267632 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628\": container with ID starting with 8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628 not found: ID does not exist" containerID="8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.267678 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628"} err="failed to get container status \"8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628\": rpc error: code = NotFound desc = could not find container \"8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628\": container with ID starting with 8f7eaf1ead30c3d43211266a037fa5a8d0c2b80d81c10e3a347a6996ab23e628 not found: ID does not exist" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.267715 4682 scope.go:117] "RemoveContainer" containerID="d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7" Dec 10 11:10:13 crc kubenswrapper[4682]: E1210 11:10:13.268142 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7\": container with ID starting with d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7 not found: ID does not exist" containerID="d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.268183 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7"} err="failed to get container status \"d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7\": rpc error: code = NotFound desc = could not find container \"d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7\": container with ID starting with d6652bca13780817c220c11b48b1e1f8c61df41e25f44de75cdf49409f3eeec7 not found: ID does not exist" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.296813 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-combined-ca-bundle\") pod \"1f4ee09a-f13e-474b-a40d-662a22124fcf\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.296881 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-combined-ca-bundle\") pod \"6442c083-12c8-47f2-8a74-09443168bad0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.296930 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f4ee09a-f13e-474b-a40d-662a22124fcf-logs\") pod \"1f4ee09a-f13e-474b-a40d-662a22124fcf\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.296952 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krb8v\" (UniqueName: \"kubernetes.io/projected/1f4ee09a-f13e-474b-a40d-662a22124fcf-kube-api-access-krb8v\") pod \"1f4ee09a-f13e-474b-a40d-662a22124fcf\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.297042 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-config-data\") pod \"1f4ee09a-f13e-474b-a40d-662a22124fcf\" (UID: \"1f4ee09a-f13e-474b-a40d-662a22124fcf\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.297077 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b9pq\" (UniqueName: \"kubernetes.io/projected/6442c083-12c8-47f2-8a74-09443168bad0-kube-api-access-2b9pq\") pod \"6442c083-12c8-47f2-8a74-09443168bad0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.297115 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-config-data\") pod \"6442c083-12c8-47f2-8a74-09443168bad0\" (UID: \"6442c083-12c8-47f2-8a74-09443168bad0\") " Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.297440 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f4ee09a-f13e-474b-a40d-662a22124fcf-logs" (OuterVolumeSpecName: "logs") pod "1f4ee09a-f13e-474b-a40d-662a22124fcf" (UID: "1f4ee09a-f13e-474b-a40d-662a22124fcf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.297931 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f4ee09a-f13e-474b-a40d-662a22124fcf-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.303569 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6442c083-12c8-47f2-8a74-09443168bad0-kube-api-access-2b9pq" (OuterVolumeSpecName: "kube-api-access-2b9pq") pod "6442c083-12c8-47f2-8a74-09443168bad0" (UID: "6442c083-12c8-47f2-8a74-09443168bad0"). InnerVolumeSpecName "kube-api-access-2b9pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.311215 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f4ee09a-f13e-474b-a40d-662a22124fcf-kube-api-access-krb8v" (OuterVolumeSpecName: "kube-api-access-krb8v") pod "1f4ee09a-f13e-474b-a40d-662a22124fcf" (UID: "1f4ee09a-f13e-474b-a40d-662a22124fcf"). InnerVolumeSpecName "kube-api-access-krb8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.326282 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-config-data" (OuterVolumeSpecName: "config-data") pod "6442c083-12c8-47f2-8a74-09443168bad0" (UID: "6442c083-12c8-47f2-8a74-09443168bad0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.327749 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-config-data" (OuterVolumeSpecName: "config-data") pod "1f4ee09a-f13e-474b-a40d-662a22124fcf" (UID: "1f4ee09a-f13e-474b-a40d-662a22124fcf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.333401 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f4ee09a-f13e-474b-a40d-662a22124fcf" (UID: "1f4ee09a-f13e-474b-a40d-662a22124fcf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.343394 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6442c083-12c8-47f2-8a74-09443168bad0" (UID: "6442c083-12c8-47f2-8a74-09443168bad0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.399582 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.399615 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.399625 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krb8v\" (UniqueName: \"kubernetes.io/projected/1f4ee09a-f13e-474b-a40d-662a22124fcf-kube-api-access-krb8v\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.399635 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f4ee09a-f13e-474b-a40d-662a22124fcf-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.399645 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b9pq\" (UniqueName: \"kubernetes.io/projected/6442c083-12c8-47f2-8a74-09443168bad0-kube-api-access-2b9pq\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.399653 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6442c083-12c8-47f2-8a74-09443168bad0-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.558545 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.594430 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.617423 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.650729 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.661657 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: E1210 11:10:13.662279 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-log" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.662301 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-log" Dec 10 11:10:13 crc kubenswrapper[4682]: E1210 11:10:13.662311 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-metadata" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.662319 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-metadata" Dec 10 11:10:13 crc kubenswrapper[4682]: E1210 11:10:13.662341 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6442c083-12c8-47f2-8a74-09443168bad0" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.662352 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6442c083-12c8-47f2-8a74-09443168bad0" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.662633 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-log" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.662674 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" containerName="nova-metadata-metadata" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.662689 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="6442c083-12c8-47f2-8a74-09443168bad0" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.664162 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.671123 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.671324 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.678724 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.680625 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.684638 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.685841 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.692216 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.696580 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.707623 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.808960 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809022 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809054 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809228 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fknkw\" (UniqueName: \"kubernetes.io/projected/160b22a6-2d74-4e00-ac9c-1c12f3af4190-kube-api-access-fknkw\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809316 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66lh6\" (UniqueName: \"kubernetes.io/projected/2fce828a-4363-49ce-8faf-ca57ba6a67d3-kube-api-access-66lh6\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809374 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fce828a-4363-49ce-8faf-ca57ba6a67d3-logs\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809573 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809691 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-config-data\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809838 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.809994 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.911665 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.911782 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.911873 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.911913 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.911948 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.912006 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fknkw\" (UniqueName: \"kubernetes.io/projected/160b22a6-2d74-4e00-ac9c-1c12f3af4190-kube-api-access-fknkw\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.912053 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66lh6\" (UniqueName: \"kubernetes.io/projected/2fce828a-4363-49ce-8faf-ca57ba6a67d3-kube-api-access-66lh6\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.912093 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fce828a-4363-49ce-8faf-ca57ba6a67d3-logs\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.912145 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.912225 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-config-data\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.912809 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fce828a-4363-49ce-8faf-ca57ba6a67d3-logs\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.916307 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.916871 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.917358 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.918329 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-config-data\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.921011 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.921208 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/160b22a6-2d74-4e00-ac9c-1c12f3af4190-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.921884 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.935698 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fknkw\" (UniqueName: \"kubernetes.io/projected/160b22a6-2d74-4e00-ac9c-1c12f3af4190-kube-api-access-fknkw\") pod \"nova-cell1-novncproxy-0\" (UID: \"160b22a6-2d74-4e00-ac9c-1c12f3af4190\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:13 crc kubenswrapper[4682]: I1210 11:10:13.937947 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66lh6\" (UniqueName: \"kubernetes.io/projected/2fce828a-4363-49ce-8faf-ca57ba6a67d3-kube-api-access-66lh6\") pod \"nova-metadata-0\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " pod="openstack/nova-metadata-0" Dec 10 11:10:14 crc kubenswrapper[4682]: I1210 11:10:14.001879 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:14 crc kubenswrapper[4682]: I1210 11:10:14.015117 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:14 crc kubenswrapper[4682]: I1210 11:10:14.397873 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f4ee09a-f13e-474b-a40d-662a22124fcf" path="/var/lib/kubelet/pods/1f4ee09a-f13e-474b-a40d-662a22124fcf/volumes" Dec 10 11:10:14 crc kubenswrapper[4682]: I1210 11:10:14.399685 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6442c083-12c8-47f2-8a74-09443168bad0" path="/var/lib/kubelet/pods/6442c083-12c8-47f2-8a74-09443168bad0/volumes" Dec 10 11:10:14 crc kubenswrapper[4682]: W1210 11:10:14.508233 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fce828a_4363_49ce_8faf_ca57ba6a67d3.slice/crio-c38c22d39000ff89dc0b5ccea1f0ed5f44295091be36fe31fec4a8bd28cbe426 WatchSource:0}: Error finding container c38c22d39000ff89dc0b5ccea1f0ed5f44295091be36fe31fec4a8bd28cbe426: Status 404 returned error can't find the container with id c38c22d39000ff89dc0b5ccea1f0ed5f44295091be36fe31fec4a8bd28cbe426 Dec 10 11:10:14 crc kubenswrapper[4682]: I1210 11:10:14.508588 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:14 crc kubenswrapper[4682]: I1210 11:10:14.589199 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.232796 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2fce828a-4363-49ce-8faf-ca57ba6a67d3","Type":"ContainerStarted","Data":"6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0"} Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.233149 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2fce828a-4363-49ce-8faf-ca57ba6a67d3","Type":"ContainerStarted","Data":"13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b"} Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.233160 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2fce828a-4363-49ce-8faf-ca57ba6a67d3","Type":"ContainerStarted","Data":"c38c22d39000ff89dc0b5ccea1f0ed5f44295091be36fe31fec4a8bd28cbe426"} Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.234505 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"160b22a6-2d74-4e00-ac9c-1c12f3af4190","Type":"ContainerStarted","Data":"b468c1da042530180f38225918f2554001f5945d2614be901b83f9edd5480187"} Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.234529 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"160b22a6-2d74-4e00-ac9c-1c12f3af4190","Type":"ContainerStarted","Data":"c12cddba8c2751b906ddac88316ead29ca5ae394f37ae4897569cac7723f86ed"} Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.256081 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.256064689 podStartE2EDuration="2.256064689s" podCreationTimestamp="2025-12-10 11:10:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:15.248403543 +0000 UTC m=+1495.568614293" watchObservedRunningTime="2025-12-10 11:10:15.256064689 +0000 UTC m=+1495.576275439" Dec 10 11:10:15 crc kubenswrapper[4682]: I1210 11:10:15.282205 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.282183823 podStartE2EDuration="2.282183823s" podCreationTimestamp="2025-12-10 11:10:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:15.266773138 +0000 UTC m=+1495.586983948" watchObservedRunningTime="2025-12-10 11:10:15.282183823 +0000 UTC m=+1495.602394573" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.415560 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.416148 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.416965 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.417019 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.419003 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.419880 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.623641 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78468d7767-rx5lf"] Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.625504 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.636730 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-rx5lf"] Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.774555 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4tsd\" (UniqueName: \"kubernetes.io/projected/a64a7fde-65b5-4376-ac93-deb06f0ceb93-kube-api-access-m4tsd\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.775035 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-config\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.775160 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-svc\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.775283 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-swift-storage-0\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.775364 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-nb\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.775506 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-sb\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.877399 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4tsd\" (UniqueName: \"kubernetes.io/projected/a64a7fde-65b5-4376-ac93-deb06f0ceb93-kube-api-access-m4tsd\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.877458 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-config\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.877530 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-svc\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.877585 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-swift-storage-0\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.877603 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-nb\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.877641 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-sb\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.878530 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-svc\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.878727 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-sb\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.878968 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-swift-storage-0\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.879088 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-config\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.879357 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-nb\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.898129 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4tsd\" (UniqueName: \"kubernetes.io/projected/a64a7fde-65b5-4376-ac93-deb06f0ceb93-kube-api-access-m4tsd\") pod \"dnsmasq-dns-78468d7767-rx5lf\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:16 crc kubenswrapper[4682]: I1210 11:10:16.960303 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:17 crc kubenswrapper[4682]: I1210 11:10:17.453185 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-rx5lf"] Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.278714 4682 generic.go:334] "Generic (PLEG): container finished" podID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerID="dfc36880e5be5501910b9050a9834872ce0210072959c36c1ea94a040bf2b50f" exitCode=0 Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.278837 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" event={"ID":"a64a7fde-65b5-4376-ac93-deb06f0ceb93","Type":"ContainerDied","Data":"dfc36880e5be5501910b9050a9834872ce0210072959c36c1ea94a040bf2b50f"} Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.279297 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" event={"ID":"a64a7fde-65b5-4376-ac93-deb06f0ceb93","Type":"ContainerStarted","Data":"517d22be77f5bd75b7377a82364799557a1a19cfedcc0356e41caa013b6ea254"} Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.940975 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.941556 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-central-agent" containerID="cri-o://a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41" gracePeriod=30 Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.941582 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="sg-core" containerID="cri-o://5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba" gracePeriod=30 Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.941618 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="proxy-httpd" containerID="cri-o://62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec" gracePeriod=30 Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.941710 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-notification-agent" containerID="cri-o://aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87" gracePeriod=30 Dec 10 11:10:18 crc kubenswrapper[4682]: I1210 11:10:18.949741 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.213:3000/\": read tcp 10.217.0.2:52096->10.217.0.213:3000: read: connection reset by peer" Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.002620 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.002677 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.016200 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.181715 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.289635 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" event={"ID":"a64a7fde-65b5-4376-ac93-deb06f0ceb93","Type":"ContainerStarted","Data":"a151ea25ced91cbe769907f17c8f42050d7782476cba4e2bea6c2bf5dcebad46"} Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.289772 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.292716 4682 generic.go:334] "Generic (PLEG): container finished" podID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerID="62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec" exitCode=0 Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.292758 4682 generic.go:334] "Generic (PLEG): container finished" podID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerID="5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba" exitCode=2 Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.292812 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerDied","Data":"62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec"} Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.292849 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerDied","Data":"5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba"} Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.292954 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-log" containerID="cri-o://2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba" gracePeriod=30 Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.293082 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-api" containerID="cri-o://cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86" gracePeriod=30 Dec 10 11:10:19 crc kubenswrapper[4682]: I1210 11:10:19.329721 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" podStartSLOduration=3.3296966230000002 podStartE2EDuration="3.329696623s" podCreationTimestamp="2025-12-10 11:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:19.314048402 +0000 UTC m=+1499.634259172" watchObservedRunningTime="2025-12-10 11:10:19.329696623 +0000 UTC m=+1499.649907383" Dec 10 11:10:20 crc kubenswrapper[4682]: I1210 11:10:20.317140 4682 generic.go:334] "Generic (PLEG): container finished" podID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerID="2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba" exitCode=143 Dec 10 11:10:20 crc kubenswrapper[4682]: I1210 11:10:20.317421 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83387941-b169-462c-9fc2-0ccfa3a8ab49","Type":"ContainerDied","Data":"2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba"} Dec 10 11:10:20 crc kubenswrapper[4682]: I1210 11:10:20.322801 4682 generic.go:334] "Generic (PLEG): container finished" podID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerID="a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41" exitCode=0 Dec 10 11:10:20 crc kubenswrapper[4682]: I1210 11:10:20.322946 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerDied","Data":"a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41"} Dec 10 11:10:20 crc kubenswrapper[4682]: I1210 11:10:20.928600 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.107683 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-combined-ca-bundle\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.107969 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-ceilometer-tls-certs\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108051 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-sg-core-conf-yaml\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108136 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-config-data\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108181 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krmxh\" (UniqueName: \"kubernetes.io/projected/60fa05d8-74a9-4960-bbb8-ceed10ea183c-kube-api-access-krmxh\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108215 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-scripts\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108295 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-log-httpd\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108341 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-run-httpd\") pod \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\" (UID: \"60fa05d8-74a9-4960-bbb8-ceed10ea183c\") " Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.108879 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.109018 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.109100 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.113558 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-scripts" (OuterVolumeSpecName: "scripts") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.129699 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60fa05d8-74a9-4960-bbb8-ceed10ea183c-kube-api-access-krmxh" (OuterVolumeSpecName: "kube-api-access-krmxh") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "kube-api-access-krmxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.144224 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.182631 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.193554 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.211306 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krmxh\" (UniqueName: \"kubernetes.io/projected/60fa05d8-74a9-4960-bbb8-ceed10ea183c-kube-api-access-krmxh\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.211349 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.211363 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/60fa05d8-74a9-4960-bbb8-ceed10ea183c-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.211375 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.211385 4682 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.211398 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.239284 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-config-data" (OuterVolumeSpecName: "config-data") pod "60fa05d8-74a9-4960-bbb8-ceed10ea183c" (UID: "60fa05d8-74a9-4960-bbb8-ceed10ea183c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.313008 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60fa05d8-74a9-4960-bbb8-ceed10ea183c-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.342901 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.342920 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerDied","Data":"aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87"} Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.342986 4682 scope.go:117] "RemoveContainer" containerID="62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.342767 4682 generic.go:334] "Generic (PLEG): container finished" podID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerID="aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87" exitCode=0 Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.343866 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"60fa05d8-74a9-4960-bbb8-ceed10ea183c","Type":"ContainerDied","Data":"4e017f0d51cbbf661766c84579da1d7ea78586fc70830e2684e0a52d2e6821e8"} Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.367891 4682 scope.go:117] "RemoveContainer" containerID="5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.438674 4682 scope.go:117] "RemoveContainer" containerID="aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.460685 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.467400 4682 scope.go:117] "RemoveContainer" containerID="a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.477052 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.493654 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.494205 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-notification-agent" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494220 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-notification-agent" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.494243 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-central-agent" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494253 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-central-agent" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.494266 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="sg-core" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494274 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="sg-core" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.494284 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="proxy-httpd" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494291 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="proxy-httpd" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494571 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-notification-agent" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494601 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="proxy-httpd" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494615 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="ceilometer-central-agent" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.494632 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="sg-core" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.496953 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.502104 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.503351 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.503420 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.505540 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.512388 4682 scope.go:117] "RemoveContainer" containerID="62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.512834 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec\": container with ID starting with 62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec not found: ID does not exist" containerID="62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.512867 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec"} err="failed to get container status \"62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec\": rpc error: code = NotFound desc = could not find container \"62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec\": container with ID starting with 62bf16d3b8c287f5f99c4da1bdb49e96150ab1ed06e283c518b094c6760f95ec not found: ID does not exist" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.512897 4682 scope.go:117] "RemoveContainer" containerID="5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.514665 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba\": container with ID starting with 5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba not found: ID does not exist" containerID="5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.514730 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba"} err="failed to get container status \"5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba\": rpc error: code = NotFound desc = could not find container \"5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba\": container with ID starting with 5ac4a25dcf93ec11108dd9f0ec5f60439dce3fab9f13522d2efcda0362e684ba not found: ID does not exist" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.514764 4682 scope.go:117] "RemoveContainer" containerID="aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.515173 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87\": container with ID starting with aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87 not found: ID does not exist" containerID="aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.515193 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87"} err="failed to get container status \"aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87\": rpc error: code = NotFound desc = could not find container \"aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87\": container with ID starting with aa662ab14934d742bef282d56dbc26dfca88ef97d1a6c3352cc0e18501cdbf87 not found: ID does not exist" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.515206 4682 scope.go:117] "RemoveContainer" containerID="a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41" Dec 10 11:10:21 crc kubenswrapper[4682]: E1210 11:10:21.515408 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41\": container with ID starting with a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41 not found: ID does not exist" containerID="a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.515434 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41"} err="failed to get container status \"a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41\": rpc error: code = NotFound desc = could not find container \"a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41\": container with ID starting with a6dbfb51611c94d322c7bae6d8c1d40bdda4c67b0b582d1c984b6f8c39362b41 not found: ID does not exist" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.620519 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-run-httpd\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.620760 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-config-data\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.620792 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-log-httpd\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.620856 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtrnt\" (UniqueName: \"kubernetes.io/projected/4f238a47-7afd-494d-80d2-1eed26cdb0cc-kube-api-access-xtrnt\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.621070 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.621166 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.621196 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-scripts\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.621304 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723324 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723393 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723421 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-scripts\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723461 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723568 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-run-httpd\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723673 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-config-data\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723694 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-log-httpd\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.723728 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtrnt\" (UniqueName: \"kubernetes.io/projected/4f238a47-7afd-494d-80d2-1eed26cdb0cc-kube-api-access-xtrnt\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.724138 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-run-httpd\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.724331 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-log-httpd\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.729820 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.730074 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.730316 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-scripts\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.730574 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-config-data\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.734667 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.742993 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtrnt\" (UniqueName: \"kubernetes.io/projected/4f238a47-7afd-494d-80d2-1eed26cdb0cc-kube-api-access-xtrnt\") pod \"ceilometer-0\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " pod="openstack/ceilometer-0" Dec 10 11:10:21 crc kubenswrapper[4682]: I1210 11:10:21.815817 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:10:22 crc kubenswrapper[4682]: I1210 11:10:22.258417 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:10:22 crc kubenswrapper[4682]: W1210 11:10:22.259208 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f238a47_7afd_494d_80d2_1eed26cdb0cc.slice/crio-c422b6bc1f0850356395720a5366acc89daab4fa3a8271b7ede4a8e8312c3781 WatchSource:0}: Error finding container c422b6bc1f0850356395720a5366acc89daab4fa3a8271b7ede4a8e8312c3781: Status 404 returned error can't find the container with id c422b6bc1f0850356395720a5366acc89daab4fa3a8271b7ede4a8e8312c3781 Dec 10 11:10:22 crc kubenswrapper[4682]: I1210 11:10:22.356706 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerStarted","Data":"c422b6bc1f0850356395720a5366acc89daab4fa3a8271b7ede4a8e8312c3781"} Dec 10 11:10:22 crc kubenswrapper[4682]: I1210 11:10:22.392813 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" path="/var/lib/kubelet/pods/60fa05d8-74a9-4960-bbb8-ceed10ea183c/volumes" Dec 10 11:10:22 crc kubenswrapper[4682]: I1210 11:10:22.960261 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.059533 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vf75b\" (UniqueName: \"kubernetes.io/projected/83387941-b169-462c-9fc2-0ccfa3a8ab49-kube-api-access-vf75b\") pod \"83387941-b169-462c-9fc2-0ccfa3a8ab49\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.059660 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83387941-b169-462c-9fc2-0ccfa3a8ab49-logs\") pod \"83387941-b169-462c-9fc2-0ccfa3a8ab49\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.059717 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-combined-ca-bundle\") pod \"83387941-b169-462c-9fc2-0ccfa3a8ab49\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.059800 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-config-data\") pod \"83387941-b169-462c-9fc2-0ccfa3a8ab49\" (UID: \"83387941-b169-462c-9fc2-0ccfa3a8ab49\") " Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.060044 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83387941-b169-462c-9fc2-0ccfa3a8ab49-logs" (OuterVolumeSpecName: "logs") pod "83387941-b169-462c-9fc2-0ccfa3a8ab49" (UID: "83387941-b169-462c-9fc2-0ccfa3a8ab49"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.060553 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83387941-b169-462c-9fc2-0ccfa3a8ab49-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.079641 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83387941-b169-462c-9fc2-0ccfa3a8ab49-kube-api-access-vf75b" (OuterVolumeSpecName: "kube-api-access-vf75b") pod "83387941-b169-462c-9fc2-0ccfa3a8ab49" (UID: "83387941-b169-462c-9fc2-0ccfa3a8ab49"). InnerVolumeSpecName "kube-api-access-vf75b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.092723 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83387941-b169-462c-9fc2-0ccfa3a8ab49" (UID: "83387941-b169-462c-9fc2-0ccfa3a8ab49"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.107504 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-config-data" (OuterVolumeSpecName: "config-data") pod "83387941-b169-462c-9fc2-0ccfa3a8ab49" (UID: "83387941-b169-462c-9fc2-0ccfa3a8ab49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.162600 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vf75b\" (UniqueName: \"kubernetes.io/projected/83387941-b169-462c-9fc2-0ccfa3a8ab49-kube-api-access-vf75b\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.162630 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.162639 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83387941-b169-462c-9fc2-0ccfa3a8ab49-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.372998 4682 generic.go:334] "Generic (PLEG): container finished" podID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerID="cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86" exitCode=0 Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.373074 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83387941-b169-462c-9fc2-0ccfa3a8ab49","Type":"ContainerDied","Data":"cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86"} Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.373403 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83387941-b169-462c-9fc2-0ccfa3a8ab49","Type":"ContainerDied","Data":"7183866cd6fd61e6046b03367a6e9b247651e70f1e516ddd07955f350eee40f3"} Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.373432 4682 scope.go:117] "RemoveContainer" containerID="cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.373127 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.436177 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.448745 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.461443 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:23 crc kubenswrapper[4682]: E1210 11:10:23.462036 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-api" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.462057 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-api" Dec 10 11:10:23 crc kubenswrapper[4682]: E1210 11:10:23.462087 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-log" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.462094 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-log" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.462277 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-log" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.462294 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" containerName="nova-api-api" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.463387 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.465661 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.465864 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.472961 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.473090 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.569531 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-config-data\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.569620 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4dr8\" (UniqueName: \"kubernetes.io/projected/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-kube-api-access-g4dr8\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.569782 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-internal-tls-certs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.569882 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-public-tls-certs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.569903 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-logs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.569943 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.628873 4682 scope.go:117] "RemoveContainer" containerID="2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.669864 4682 scope.go:117] "RemoveContainer" containerID="cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671132 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-internal-tls-certs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671209 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-logs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671231 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-public-tls-certs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671281 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671414 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4dr8\" (UniqueName: \"kubernetes.io/projected/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-kube-api-access-g4dr8\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671460 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-config-data\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: E1210 11:10:23.671545 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86\": container with ID starting with cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86 not found: ID does not exist" containerID="cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671626 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86"} err="failed to get container status \"cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86\": rpc error: code = NotFound desc = could not find container \"cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86\": container with ID starting with cf050a2ccfed496febd3b61e3ae4d10fa9ae8785dcc16b0527864f3618d38e86 not found: ID does not exist" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671670 4682 scope.go:117] "RemoveContainer" containerID="2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba" Dec 10 11:10:23 crc kubenswrapper[4682]: E1210 11:10:23.671928 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba\": container with ID starting with 2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba not found: ID does not exist" containerID="2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.671971 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba"} err="failed to get container status \"2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba\": rpc error: code = NotFound desc = could not find container \"2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba\": container with ID starting with 2f58aef5715388bf77ba0fbfe2265b9a64bb02b1d5be126496b94b678d3545ba not found: ID does not exist" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.673760 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-logs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.677981 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-internal-tls-certs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.678267 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.678595 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-config-data\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.678821 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-public-tls-certs\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.701696 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4dr8\" (UniqueName: \"kubernetes.io/projected/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-kube-api-access-g4dr8\") pod \"nova-api-0\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " pod="openstack/nova-api-0" Dec 10 11:10:23 crc kubenswrapper[4682]: I1210 11:10:23.782452 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.002990 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.003318 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.015686 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.068582 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.359204 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:24 crc kubenswrapper[4682]: W1210 11:10:24.361810 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89bd5736_7cac_4956_bd2d_1fe4fccdbeaa.slice/crio-c39f429ac4bae62a26a776dc325811560b3200204301407329c45e89c79a8654 WatchSource:0}: Error finding container c39f429ac4bae62a26a776dc325811560b3200204301407329c45e89c79a8654: Status 404 returned error can't find the container with id c39f429ac4bae62a26a776dc325811560b3200204301407329c45e89c79a8654 Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.406360 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83387941-b169-462c-9fc2-0ccfa3a8ab49" path="/var/lib/kubelet/pods/83387941-b169-462c-9fc2-0ccfa3a8ab49/volumes" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.407143 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerStarted","Data":"f81a8e442f780c8d84f189b3b7a1ccfb204da7ebad172398273e73fc598eb533"} Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.407170 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa","Type":"ContainerStarted","Data":"c39f429ac4bae62a26a776dc325811560b3200204301407329c45e89c79a8654"} Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.420774 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.608214 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-bnl68"] Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.610539 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.614120 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.614368 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.619710 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-bnl68"] Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.805836 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-config-data\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.805879 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrnw9\" (UniqueName: \"kubernetes.io/projected/34d52064-5f24-4cc9-ad72-c04f77d892bf-kube-api-access-mrnw9\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.805907 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-scripts\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.806062 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.907989 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-config-data\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.908027 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrnw9\" (UniqueName: \"kubernetes.io/projected/34d52064-5f24-4cc9-ad72-c04f77d892bf-kube-api-access-mrnw9\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.908050 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-scripts\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.908091 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.915291 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.915236 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-scripts\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.915839 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-config-data\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.930368 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrnw9\" (UniqueName: \"kubernetes.io/projected/34d52064-5f24-4cc9-ad72-c04f77d892bf-kube-api-access-mrnw9\") pod \"nova-cell1-cell-mapping-bnl68\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:24 crc kubenswrapper[4682]: I1210 11:10:24.998265 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.029600 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.217:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.029629 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.217:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.415836 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerStarted","Data":"0ac738c65821df52285af23cd35d5ba1806fa6635999d4b870563761d9370119"} Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.419016 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa","Type":"ContainerStarted","Data":"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece"} Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.419066 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa","Type":"ContainerStarted","Data":"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68"} Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.524586 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.52455141 podStartE2EDuration="2.52455141s" podCreationTimestamp="2025-12-10 11:10:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:25.449585004 +0000 UTC m=+1505.769795764" watchObservedRunningTime="2025-12-10 11:10:25.52455141 +0000 UTC m=+1505.844762170" Dec 10 11:10:25 crc kubenswrapper[4682]: I1210 11:10:25.545020 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-bnl68"] Dec 10 11:10:26 crc kubenswrapper[4682]: I1210 11:10:26.431523 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bnl68" event={"ID":"34d52064-5f24-4cc9-ad72-c04f77d892bf","Type":"ContainerStarted","Data":"ff3e928170eb52aa6c63146bd59612247767f89d8057247a6b538918b7f6e802"} Dec 10 11:10:26 crc kubenswrapper[4682]: I1210 11:10:26.431883 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bnl68" event={"ID":"34d52064-5f24-4cc9-ad72-c04f77d892bf","Type":"ContainerStarted","Data":"e6d43bc3ea730ce17de0b6b5e33525c4e929dbb030b194718d1fbd23ec595375"} Dec 10 11:10:26 crc kubenswrapper[4682]: I1210 11:10:26.434163 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerStarted","Data":"f8cbb182c7db499799396a4d869ffaf89e95422a86bb24b722f57f155a176374"} Dec 10 11:10:26 crc kubenswrapper[4682]: I1210 11:10:26.453531 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-bnl68" podStartSLOduration=2.453511097 podStartE2EDuration="2.453511097s" podCreationTimestamp="2025-12-10 11:10:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:26.446305675 +0000 UTC m=+1506.766516425" watchObservedRunningTime="2025-12-10 11:10:26.453511097 +0000 UTC m=+1506.773721847" Dec 10 11:10:26 crc kubenswrapper[4682]: I1210 11:10:26.962500 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:10:27 crc kubenswrapper[4682]: I1210 11:10:27.033814 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-d48lq"] Dec 10 11:10:27 crc kubenswrapper[4682]: I1210 11:10:27.034132 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="dnsmasq-dns" containerID="cri-o://3ec62a3bda5f29dfe0062eeb3cbf5c24afc993e135ab86d1eaf7cdc60db7a136" gracePeriod=10 Dec 10 11:10:27 crc kubenswrapper[4682]: I1210 11:10:27.228564 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.210:5353: connect: connection refused" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.491802 4682 generic.go:334] "Generic (PLEG): container finished" podID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerID="3ec62a3bda5f29dfe0062eeb3cbf5c24afc993e135ab86d1eaf7cdc60db7a136" exitCode=0 Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.492650 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" event={"ID":"c8ba79fa-4920-44e2-950b-c7b6499595c0","Type":"ContainerDied","Data":"3ec62a3bda5f29dfe0062eeb3cbf5c24afc993e135ab86d1eaf7cdc60db7a136"} Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.792564 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.881395 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.881446 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-svc\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.881504 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdmln\" (UniqueName: \"kubernetes.io/projected/c8ba79fa-4920-44e2-950b-c7b6499595c0-kube-api-access-tdmln\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.881542 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-config\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.881563 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-nb\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.881579 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-swift-storage-0\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.930541 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8ba79fa-4920-44e2-950b-c7b6499595c0-kube-api-access-tdmln" (OuterVolumeSpecName: "kube-api-access-tdmln") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "kube-api-access-tdmln". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.955861 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.982993 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.983295 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb\") pod \"c8ba79fa-4920-44e2-950b-c7b6499595c0\" (UID: \"c8ba79fa-4920-44e2-950b-c7b6499595c0\") " Dec 10 11:10:28 crc kubenswrapper[4682]: W1210 11:10:27.983898 4682 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/c8ba79fa-4920-44e2-950b-c7b6499595c0/volumes/kubernetes.io~configmap/ovsdbserver-sb Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.983952 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.984574 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.984595 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdmln\" (UniqueName: \"kubernetes.io/projected/c8ba79fa-4920-44e2-950b-c7b6499595c0-kube-api-access-tdmln\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.984618 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.985181 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:27.999226 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.001152 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-config" (OuterVolumeSpecName: "config") pod "c8ba79fa-4920-44e2-950b-c7b6499595c0" (UID: "c8ba79fa-4920-44e2-950b-c7b6499595c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.086101 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.086387 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.086399 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8ba79fa-4920-44e2-950b-c7b6499595c0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.508511 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerStarted","Data":"7686d84ae9f2cc137d7f67c21fd97c1140ece9bb6b763b63a1237301857f3a80"} Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.509748 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.511995 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" event={"ID":"c8ba79fa-4920-44e2-950b-c7b6499595c0","Type":"ContainerDied","Data":"ebea2f944dda7d11f392d40ea9f11998ac5d9fddc03ed90ab6f222af4c3c9b85"} Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.512081 4682 scope.go:117] "RemoveContainer" containerID="3ec62a3bda5f29dfe0062eeb3cbf5c24afc993e135ab86d1eaf7cdc60db7a136" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.512189 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c9cb78d75-d48lq" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.536067 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.7351245419999999 podStartE2EDuration="7.536048041s" podCreationTimestamp="2025-12-10 11:10:21 +0000 UTC" firstStartedPulling="2025-12-10 11:10:22.261717789 +0000 UTC m=+1502.581928539" lastFinishedPulling="2025-12-10 11:10:28.062641288 +0000 UTC m=+1508.382852038" observedRunningTime="2025-12-10 11:10:28.530558781 +0000 UTC m=+1508.850769541" watchObservedRunningTime="2025-12-10 11:10:28.536048041 +0000 UTC m=+1508.856258791" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.539782 4682 scope.go:117] "RemoveContainer" containerID="dc5de907d4cd927bc7c05a963a67f6bce685b43c5682e0b335bf6ca7005a72e2" Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.560043 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-d48lq"] Dec 10 11:10:28 crc kubenswrapper[4682]: I1210 11:10:28.569453 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c9cb78d75-d48lq"] Dec 10 11:10:30 crc kubenswrapper[4682]: I1210 11:10:30.404675 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" path="/var/lib/kubelet/pods/c8ba79fa-4920-44e2-950b-c7b6499595c0/volumes" Dec 10 11:10:31 crc kubenswrapper[4682]: I1210 11:10:31.557574 4682 generic.go:334] "Generic (PLEG): container finished" podID="34d52064-5f24-4cc9-ad72-c04f77d892bf" containerID="ff3e928170eb52aa6c63146bd59612247767f89d8057247a6b538918b7f6e802" exitCode=0 Dec 10 11:10:31 crc kubenswrapper[4682]: I1210 11:10:31.557638 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bnl68" event={"ID":"34d52064-5f24-4cc9-ad72-c04f77d892bf","Type":"ContainerDied","Data":"ff3e928170eb52aa6c63146bd59612247767f89d8057247a6b538918b7f6e802"} Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:32.999817 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.091037 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrnw9\" (UniqueName: \"kubernetes.io/projected/34d52064-5f24-4cc9-ad72-c04f77d892bf-kube-api-access-mrnw9\") pod \"34d52064-5f24-4cc9-ad72-c04f77d892bf\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.091234 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-combined-ca-bundle\") pod \"34d52064-5f24-4cc9-ad72-c04f77d892bf\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.091300 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-config-data\") pod \"34d52064-5f24-4cc9-ad72-c04f77d892bf\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.091403 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-scripts\") pod \"34d52064-5f24-4cc9-ad72-c04f77d892bf\" (UID: \"34d52064-5f24-4cc9-ad72-c04f77d892bf\") " Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.099244 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34d52064-5f24-4cc9-ad72-c04f77d892bf-kube-api-access-mrnw9" (OuterVolumeSpecName: "kube-api-access-mrnw9") pod "34d52064-5f24-4cc9-ad72-c04f77d892bf" (UID: "34d52064-5f24-4cc9-ad72-c04f77d892bf"). InnerVolumeSpecName "kube-api-access-mrnw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.099322 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-scripts" (OuterVolumeSpecName: "scripts") pod "34d52064-5f24-4cc9-ad72-c04f77d892bf" (UID: "34d52064-5f24-4cc9-ad72-c04f77d892bf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.139950 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-config-data" (OuterVolumeSpecName: "config-data") pod "34d52064-5f24-4cc9-ad72-c04f77d892bf" (UID: "34d52064-5f24-4cc9-ad72-c04f77d892bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.143721 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34d52064-5f24-4cc9-ad72-c04f77d892bf" (UID: "34d52064-5f24-4cc9-ad72-c04f77d892bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.193447 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.193489 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.193499 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/34d52064-5f24-4cc9-ad72-c04f77d892bf-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.193509 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrnw9\" (UniqueName: \"kubernetes.io/projected/34d52064-5f24-4cc9-ad72-c04f77d892bf-kube-api-access-mrnw9\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.580163 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bnl68" event={"ID":"34d52064-5f24-4cc9-ad72-c04f77d892bf","Type":"ContainerDied","Data":"e6d43bc3ea730ce17de0b6b5e33525c4e929dbb030b194718d1fbd23ec595375"} Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.580200 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6d43bc3ea730ce17de0b6b5e33525c4e929dbb030b194718d1fbd23ec595375" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.580277 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bnl68" Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.763795 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.764240 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-log" containerID="cri-o://c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68" gracePeriod=30 Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.764339 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-api" containerID="cri-o://579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece" gracePeriod=30 Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.836029 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.836300 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-log" containerID="cri-o://13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b" gracePeriod=30 Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.836611 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-metadata" containerID="cri-o://6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0" gracePeriod=30 Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.850207 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:10:33 crc kubenswrapper[4682]: I1210 11:10:33.850439 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="24421d2d-02fd-4c84-91ef-fbc5b8754a9f" containerName="nova-scheduler-scheduler" containerID="cri-o://2998187a8fe5f50e56e8c7333d72338ed1c51ff4c0b98d6c41f2bd11ffe43d90" gracePeriod=30 Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.448355 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591323 4682 generic.go:334] "Generic (PLEG): container finished" podID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerID="579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece" exitCode=0 Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591358 4682 generic.go:334] "Generic (PLEG): container finished" podID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerID="c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68" exitCode=143 Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591404 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa","Type":"ContainerDied","Data":"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece"} Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591429 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa","Type":"ContainerDied","Data":"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68"} Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591438 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa","Type":"ContainerDied","Data":"c39f429ac4bae62a26a776dc325811560b3200204301407329c45e89c79a8654"} Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591452 4682 scope.go:117] "RemoveContainer" containerID="579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.591597 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.596982 4682 generic.go:334] "Generic (PLEG): container finished" podID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerID="13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b" exitCode=143 Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.597242 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2fce828a-4363-49ce-8faf-ca57ba6a67d3","Type":"ContainerDied","Data":"13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b"} Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.617313 4682 scope.go:117] "RemoveContainer" containerID="c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.636544 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-config-data\") pod \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.636610 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-logs\") pod \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.636635 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4dr8\" (UniqueName: \"kubernetes.io/projected/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-kube-api-access-g4dr8\") pod \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.636669 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-internal-tls-certs\") pod \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.636706 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-combined-ca-bundle\") pod \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.636742 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-public-tls-certs\") pod \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\" (UID: \"89bd5736-7cac-4956-bd2d-1fe4fccdbeaa\") " Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.638104 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-logs" (OuterVolumeSpecName: "logs") pod "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" (UID: "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.639459 4682 scope.go:117] "RemoveContainer" containerID="579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece" Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.643681 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece\": container with ID starting with 579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece not found: ID does not exist" containerID="579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.643728 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece"} err="failed to get container status \"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece\": rpc error: code = NotFound desc = could not find container \"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece\": container with ID starting with 579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece not found: ID does not exist" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.643756 4682 scope.go:117] "RemoveContainer" containerID="c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.643926 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-kube-api-access-g4dr8" (OuterVolumeSpecName: "kube-api-access-g4dr8") pod "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" (UID: "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa"). InnerVolumeSpecName "kube-api-access-g4dr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.644210 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68\": container with ID starting with c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68 not found: ID does not exist" containerID="c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.644244 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68"} err="failed to get container status \"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68\": rpc error: code = NotFound desc = could not find container \"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68\": container with ID starting with c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68 not found: ID does not exist" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.644272 4682 scope.go:117] "RemoveContainer" containerID="579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.644739 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece"} err="failed to get container status \"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece\": rpc error: code = NotFound desc = could not find container \"579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece\": container with ID starting with 579222ae43fe6de7db0fd131f415e995e11998dd23e31edf63718ffcdf92dece not found: ID does not exist" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.644785 4682 scope.go:117] "RemoveContainer" containerID="c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.645121 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68"} err="failed to get container status \"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68\": rpc error: code = NotFound desc = could not find container \"c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68\": container with ID starting with c1b9fd9273ec4a2a2bed98d80127e30f5c0e6b255fb69b760793bb4632003b68 not found: ID does not exist" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.673349 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-config-data" (OuterVolumeSpecName: "config-data") pod "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" (UID: "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.686996 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" (UID: "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.699594 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" (UID: "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.726222 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" (UID: "89bd5736-7cac-4956-bd2d-1fe4fccdbeaa"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.739787 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.739823 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.739832 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4dr8\" (UniqueName: \"kubernetes.io/projected/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-kube-api-access-g4dr8\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.739844 4682 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.739852 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.739860 4682 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.923424 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.933810 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.952420 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.952982 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-api" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953006 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-api" Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.953026 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="init" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953032 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="init" Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.953046 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="dnsmasq-dns" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953051 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="dnsmasq-dns" Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.953082 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-log" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953092 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-log" Dec 10 11:10:34 crc kubenswrapper[4682]: E1210 11:10:34.953109 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34d52064-5f24-4cc9-ad72-c04f77d892bf" containerName="nova-manage" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953114 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="34d52064-5f24-4cc9-ad72-c04f77d892bf" containerName="nova-manage" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953343 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-api" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953367 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="34d52064-5f24-4cc9-ad72-c04f77d892bf" containerName="nova-manage" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953383 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8ba79fa-4920-44e2-950b-c7b6499595c0" containerName="dnsmasq-dns" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.953399 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" containerName="nova-api-log" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.954770 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.956854 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.957114 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.957546 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 11:10:34 crc kubenswrapper[4682]: I1210 11:10:34.971414 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.044779 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-public-tls-certs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.044918 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.044995 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6014f5d3-c141-4ace-b793-2fa5aaa2c856-logs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.045065 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m4vf\" (UniqueName: \"kubernetes.io/projected/6014f5d3-c141-4ace-b793-2fa5aaa2c856-kube-api-access-6m4vf\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.045116 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-config-data\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.045204 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.146870 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6014f5d3-c141-4ace-b793-2fa5aaa2c856-logs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.146936 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m4vf\" (UniqueName: \"kubernetes.io/projected/6014f5d3-c141-4ace-b793-2fa5aaa2c856-kube-api-access-6m4vf\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.147003 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-config-data\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.147083 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.147121 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-public-tls-certs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.147679 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.147695 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6014f5d3-c141-4ace-b793-2fa5aaa2c856-logs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.152059 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-public-tls-certs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.152145 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.152186 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.152775 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6014f5d3-c141-4ace-b793-2fa5aaa2c856-config-data\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.168409 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m4vf\" (UniqueName: \"kubernetes.io/projected/6014f5d3-c141-4ace-b793-2fa5aaa2c856-kube-api-access-6m4vf\") pod \"nova-api-0\" (UID: \"6014f5d3-c141-4ace-b793-2fa5aaa2c856\") " pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.338838 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 11:10:35 crc kubenswrapper[4682]: I1210 11:10:35.806174 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.398222 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89bd5736-7cac-4956-bd2d-1fe4fccdbeaa" path="/var/lib/kubelet/pods/89bd5736-7cac-4956-bd2d-1fe4fccdbeaa/volumes" Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.621802 4682 generic.go:334] "Generic (PLEG): container finished" podID="24421d2d-02fd-4c84-91ef-fbc5b8754a9f" containerID="2998187a8fe5f50e56e8c7333d72338ed1c51ff4c0b98d6c41f2bd11ffe43d90" exitCode=0 Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.622063 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"24421d2d-02fd-4c84-91ef-fbc5b8754a9f","Type":"ContainerDied","Data":"2998187a8fe5f50e56e8c7333d72338ed1c51ff4c0b98d6c41f2bd11ffe43d90"} Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.625292 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6014f5d3-c141-4ace-b793-2fa5aaa2c856","Type":"ContainerStarted","Data":"617bec700556f7b1cc1369ea5049027e7730c4fde74f86d7ef2f797394afb7f7"} Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.625328 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6014f5d3-c141-4ace-b793-2fa5aaa2c856","Type":"ContainerStarted","Data":"33de5209a941122a8a26f8ffe5157617a05d54cafbaa5c25190be0b0ec0c7d52"} Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.625338 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6014f5d3-c141-4ace-b793-2fa5aaa2c856","Type":"ContainerStarted","Data":"7dd511678e8a512620576e2bc05de43af64a652b99d7e90e597349c9bf8292d7"} Dec 10 11:10:36 crc kubenswrapper[4682]: I1210 11:10:36.645094 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.645066912 podStartE2EDuration="2.645066912s" podCreationTimestamp="2025-12-10 11:10:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:36.642799742 +0000 UTC m=+1516.963010512" watchObservedRunningTime="2025-12-10 11:10:36.645066912 +0000 UTC m=+1516.965277662" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.186457 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.298829 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-config-data\") pod \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.299409 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-combined-ca-bundle\") pod \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.299494 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2l7gk\" (UniqueName: \"kubernetes.io/projected/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-kube-api-access-2l7gk\") pod \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\" (UID: \"24421d2d-02fd-4c84-91ef-fbc5b8754a9f\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.336782 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-kube-api-access-2l7gk" (OuterVolumeSpecName: "kube-api-access-2l7gk") pod "24421d2d-02fd-4c84-91ef-fbc5b8754a9f" (UID: "24421d2d-02fd-4c84-91ef-fbc5b8754a9f"). InnerVolumeSpecName "kube-api-access-2l7gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.339460 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-config-data" (OuterVolumeSpecName: "config-data") pod "24421d2d-02fd-4c84-91ef-fbc5b8754a9f" (UID: "24421d2d-02fd-4c84-91ef-fbc5b8754a9f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.383896 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24421d2d-02fd-4c84-91ef-fbc5b8754a9f" (UID: "24421d2d-02fd-4c84-91ef-fbc5b8754a9f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.403165 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.403197 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.403208 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2l7gk\" (UniqueName: \"kubernetes.io/projected/24421d2d-02fd-4c84-91ef-fbc5b8754a9f-kube-api-access-2l7gk\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.465714 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.614371 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fce828a-4363-49ce-8faf-ca57ba6a67d3-logs\") pod \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.614566 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-config-data\") pod \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.614654 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66lh6\" (UniqueName: \"kubernetes.io/projected/2fce828a-4363-49ce-8faf-ca57ba6a67d3-kube-api-access-66lh6\") pod \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.614717 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-combined-ca-bundle\") pod \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.614830 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fce828a-4363-49ce-8faf-ca57ba6a67d3-logs" (OuterVolumeSpecName: "logs") pod "2fce828a-4363-49ce-8faf-ca57ba6a67d3" (UID: "2fce828a-4363-49ce-8faf-ca57ba6a67d3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.614879 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-nova-metadata-tls-certs\") pod \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\" (UID: \"2fce828a-4363-49ce-8faf-ca57ba6a67d3\") " Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.615329 4682 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2fce828a-4363-49ce-8faf-ca57ba6a67d3-logs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.618642 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fce828a-4363-49ce-8faf-ca57ba6a67d3-kube-api-access-66lh6" (OuterVolumeSpecName: "kube-api-access-66lh6") pod "2fce828a-4363-49ce-8faf-ca57ba6a67d3" (UID: "2fce828a-4363-49ce-8faf-ca57ba6a67d3"). InnerVolumeSpecName "kube-api-access-66lh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.645361 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-config-data" (OuterVolumeSpecName: "config-data") pod "2fce828a-4363-49ce-8faf-ca57ba6a67d3" (UID: "2fce828a-4363-49ce-8faf-ca57ba6a67d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.647284 4682 generic.go:334] "Generic (PLEG): container finished" podID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerID="6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0" exitCode=0 Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.647333 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2fce828a-4363-49ce-8faf-ca57ba6a67d3","Type":"ContainerDied","Data":"6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0"} Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.647390 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.647406 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2fce828a-4363-49ce-8faf-ca57ba6a67d3","Type":"ContainerDied","Data":"c38c22d39000ff89dc0b5ccea1f0ed5f44295091be36fe31fec4a8bd28cbe426"} Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.647431 4682 scope.go:117] "RemoveContainer" containerID="6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.652970 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.653890 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"24421d2d-02fd-4c84-91ef-fbc5b8754a9f","Type":"ContainerDied","Data":"6ddae70184a65aa8d7a894de7542e77c70007c45b54d8c8bdfd0d53e0cfb1f8a"} Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.658671 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fce828a-4363-49ce-8faf-ca57ba6a67d3" (UID: "2fce828a-4363-49ce-8faf-ca57ba6a67d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.690351 4682 scope.go:117] "RemoveContainer" containerID="13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.711688 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2fce828a-4363-49ce-8faf-ca57ba6a67d3" (UID: "2fce828a-4363-49ce-8faf-ca57ba6a67d3"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.716601 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.717413 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.717442 4682 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.717455 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fce828a-4363-49ce-8faf-ca57ba6a67d3-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.717482 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66lh6\" (UniqueName: \"kubernetes.io/projected/2fce828a-4363-49ce-8faf-ca57ba6a67d3-kube-api-access-66lh6\") on node \"crc\" DevicePath \"\"" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.746760 4682 scope.go:117] "RemoveContainer" containerID="6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0" Dec 10 11:10:37 crc kubenswrapper[4682]: E1210 11:10:37.749351 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0\": container with ID starting with 6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0 not found: ID does not exist" containerID="6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.749393 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0"} err="failed to get container status \"6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0\": rpc error: code = NotFound desc = could not find container \"6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0\": container with ID starting with 6289f735c282abb3bdd732cb8d4a9e16afdf1d99c51caf607724227a5ad6f0c0 not found: ID does not exist" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.749422 4682 scope.go:117] "RemoveContainer" containerID="13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b" Dec 10 11:10:37 crc kubenswrapper[4682]: E1210 11:10:37.751285 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b\": container with ID starting with 13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b not found: ID does not exist" containerID="13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.751319 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b"} err="failed to get container status \"13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b\": rpc error: code = NotFound desc = could not find container \"13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b\": container with ID starting with 13919ca103e0701c2c7b43f01142e4a8cf44288609964b6b509cab6a7c99518b not found: ID does not exist" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.751345 4682 scope.go:117] "RemoveContainer" containerID="2998187a8fe5f50e56e8c7333d72338ed1c51ff4c0b98d6c41f2bd11ffe43d90" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.753450 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.771335 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:10:37 crc kubenswrapper[4682]: E1210 11:10:37.771883 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24421d2d-02fd-4c84-91ef-fbc5b8754a9f" containerName="nova-scheduler-scheduler" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.771910 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="24421d2d-02fd-4c84-91ef-fbc5b8754a9f" containerName="nova-scheduler-scheduler" Dec 10 11:10:37 crc kubenswrapper[4682]: E1210 11:10:37.771936 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-log" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.771944 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-log" Dec 10 11:10:37 crc kubenswrapper[4682]: E1210 11:10:37.771973 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-metadata" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.771984 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-metadata" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.772234 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-metadata" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.776838 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="24421d2d-02fd-4c84-91ef-fbc5b8754a9f" containerName="nova-scheduler-scheduler" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.776865 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" containerName="nova-metadata-log" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.778037 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.783291 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.785760 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.922950 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daab815f-ce3f-44be-8fbf-5a75b4379ccf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.923093 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daab815f-ce3f-44be-8fbf-5a75b4379ccf-config-data\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:37 crc kubenswrapper[4682]: I1210 11:10:37.923524 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjs2z\" (UniqueName: \"kubernetes.io/projected/daab815f-ce3f-44be-8fbf-5a75b4379ccf-kube-api-access-sjs2z\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.025001 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daab815f-ce3f-44be-8fbf-5a75b4379ccf-config-data\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.025157 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjs2z\" (UniqueName: \"kubernetes.io/projected/daab815f-ce3f-44be-8fbf-5a75b4379ccf-kube-api-access-sjs2z\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.025196 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daab815f-ce3f-44be-8fbf-5a75b4379ccf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.028627 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daab815f-ce3f-44be-8fbf-5a75b4379ccf-config-data\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.029608 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daab815f-ce3f-44be-8fbf-5a75b4379ccf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.042814 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjs2z\" (UniqueName: \"kubernetes.io/projected/daab815f-ce3f-44be-8fbf-5a75b4379ccf-kube-api-access-sjs2z\") pod \"nova-scheduler-0\" (UID: \"daab815f-ce3f-44be-8fbf-5a75b4379ccf\") " pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.052034 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.063844 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.075956 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.080278 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.084584 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.084995 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.088022 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.101221 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.228909 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.229244 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.229302 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-logs\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.229321 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-config-data\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.229421 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxk6k\" (UniqueName: \"kubernetes.io/projected/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-kube-api-access-fxk6k\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.331960 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-config-data\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.332028 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxk6k\" (UniqueName: \"kubernetes.io/projected/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-kube-api-access-fxk6k\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.332232 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.332306 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.332390 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-logs\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.334593 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-logs\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.337487 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-config-data\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.338709 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.343501 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.347860 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxk6k\" (UniqueName: \"kubernetes.io/projected/bbfbd576-92c3-44d4-bdcf-8e17e0c65946-kube-api-access-fxk6k\") pod \"nova-metadata-0\" (UID: \"bbfbd576-92c3-44d4-bdcf-8e17e0c65946\") " pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.392967 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24421d2d-02fd-4c84-91ef-fbc5b8754a9f" path="/var/lib/kubelet/pods/24421d2d-02fd-4c84-91ef-fbc5b8754a9f/volumes" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.395867 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fce828a-4363-49ce-8faf-ca57ba6a67d3" path="/var/lib/kubelet/pods/2fce828a-4363-49ce-8faf-ca57ba6a67d3/volumes" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.595603 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 11:10:38 crc kubenswrapper[4682]: W1210 11:10:38.600460 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddaab815f_ce3f_44be_8fbf_5a75b4379ccf.slice/crio-453e8477b1f884745e6737668c954123a974d12b16e31e0d6b36d718b8406408 WatchSource:0}: Error finding container 453e8477b1f884745e6737668c954123a974d12b16e31e0d6b36d718b8406408: Status 404 returned error can't find the container with id 453e8477b1f884745e6737668c954123a974d12b16e31e0d6b36d718b8406408 Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.602178 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 11:10:38 crc kubenswrapper[4682]: I1210 11:10:38.663820 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"daab815f-ce3f-44be-8fbf-5a75b4379ccf","Type":"ContainerStarted","Data":"453e8477b1f884745e6737668c954123a974d12b16e31e0d6b36d718b8406408"} Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.049541 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 11:10:39 crc kubenswrapper[4682]: W1210 11:10:39.057047 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbfbd576_92c3_44d4_bdcf_8e17e0c65946.slice/crio-29643763c43ac634dc337d3c6f1596148804595eda7bba0c6d87af2db2ec6c4e WatchSource:0}: Error finding container 29643763c43ac634dc337d3c6f1596148804595eda7bba0c6d87af2db2ec6c4e: Status 404 returned error can't find the container with id 29643763c43ac634dc337d3c6f1596148804595eda7bba0c6d87af2db2ec6c4e Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.690728 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"daab815f-ce3f-44be-8fbf-5a75b4379ccf","Type":"ContainerStarted","Data":"f941cbd7233137a3e221a8999f243ceb5e9821488782fe83ade47f55dd14822d"} Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.695839 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bbfbd576-92c3-44d4-bdcf-8e17e0c65946","Type":"ContainerStarted","Data":"a64dd7ffd0daa8bb7198ecae07fea4c7f9ad93f05f7028549c9e273f0715c3d7"} Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.695898 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bbfbd576-92c3-44d4-bdcf-8e17e0c65946","Type":"ContainerStarted","Data":"f27d5bd119eeee06a8c02862b721322fa954ee6366929120c9d16cfc466bd929"} Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.695912 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bbfbd576-92c3-44d4-bdcf-8e17e0c65946","Type":"ContainerStarted","Data":"29643763c43ac634dc337d3c6f1596148804595eda7bba0c6d87af2db2ec6c4e"} Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.717343 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.717325882 podStartE2EDuration="2.717325882s" podCreationTimestamp="2025-12-10 11:10:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:39.71209248 +0000 UTC m=+1520.032303230" watchObservedRunningTime="2025-12-10 11:10:39.717325882 +0000 UTC m=+1520.037536632" Dec 10 11:10:39 crc kubenswrapper[4682]: I1210 11:10:39.740978 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.740955258 podStartE2EDuration="1.740955258s" podCreationTimestamp="2025-12-10 11:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:10:39.728360001 +0000 UTC m=+1520.048570761" watchObservedRunningTime="2025-12-10 11:10:39.740955258 +0000 UTC m=+1520.061166018" Dec 10 11:10:43 crc kubenswrapper[4682]: I1210 11:10:43.102491 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 11:10:43 crc kubenswrapper[4682]: I1210 11:10:43.602559 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:10:43 crc kubenswrapper[4682]: I1210 11:10:43.602883 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 11:10:45 crc kubenswrapper[4682]: I1210 11:10:45.468294 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:10:45 crc kubenswrapper[4682]: I1210 11:10:45.468592 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 11:10:46 crc kubenswrapper[4682]: I1210 11:10:46.380632 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6014f5d3-c141-4ace-b793-2fa5aaa2c856" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.223:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:46 crc kubenswrapper[4682]: I1210 11:10:46.475758 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6014f5d3-c141-4ace-b793-2fa5aaa2c856" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.223:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:48 crc kubenswrapper[4682]: I1210 11:10:48.101927 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 11:10:48 crc kubenswrapper[4682]: I1210 11:10:48.142764 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 11:10:48 crc kubenswrapper[4682]: I1210 11:10:48.602836 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:10:48 crc kubenswrapper[4682]: I1210 11:10:48.603193 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 11:10:48 crc kubenswrapper[4682]: I1210 11:10:48.826098 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 11:10:49 crc kubenswrapper[4682]: I1210 11:10:49.618744 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="bbfbd576-92c3-44d4-bdcf-8e17e0c65946" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.225:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:49 crc kubenswrapper[4682]: I1210 11:10:49.618770 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="bbfbd576-92c3-44d4-bdcf-8e17e0c65946" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.225:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.618973 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="60fa05d8-74a9-4960-bbb8-ceed10ea183c" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.213:3000/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.793873 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qxpq4"] Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.797538 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.810802 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qxpq4"] Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.853636 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-catalog-content\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.853691 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-utilities\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.853757 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj2hh\" (UniqueName: \"kubernetes.io/projected/c57fa67d-be25-4197-ae3c-de720c1c9282-kube-api-access-mj2hh\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.955701 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-catalog-content\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.955745 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-utilities\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.955823 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj2hh\" (UniqueName: \"kubernetes.io/projected/c57fa67d-be25-4197-ae3c-de720c1c9282-kube-api-access-mj2hh\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.956299 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-catalog-content\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.956359 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-utilities\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:50 crc kubenswrapper[4682]: I1210 11:10:50.988235 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj2hh\" (UniqueName: \"kubernetes.io/projected/c57fa67d-be25-4197-ae3c-de720c1c9282-kube-api-access-mj2hh\") pod \"redhat-marketplace-qxpq4\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:51 crc kubenswrapper[4682]: I1210 11:10:51.224570 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:10:51 crc kubenswrapper[4682]: I1210 11:10:51.749834 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qxpq4"] Dec 10 11:10:51 crc kubenswrapper[4682]: I1210 11:10:51.819622 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerStarted","Data":"480cfcd95818e384cf0c020c827cd6560e9258901e7314e6eb02ebf64c19c30f"} Dec 10 11:10:51 crc kubenswrapper[4682]: I1210 11:10:51.826231 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:10:52 crc kubenswrapper[4682]: I1210 11:10:52.832958 4682 generic.go:334] "Generic (PLEG): container finished" podID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerID="444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2" exitCode=0 Dec 10 11:10:52 crc kubenswrapper[4682]: I1210 11:10:52.833130 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerDied","Data":"444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2"} Dec 10 11:10:53 crc kubenswrapper[4682]: I1210 11:10:53.844018 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerStarted","Data":"e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17"} Dec 10 11:10:54 crc kubenswrapper[4682]: I1210 11:10:54.855715 4682 generic.go:334] "Generic (PLEG): container finished" podID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerID="e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17" exitCode=0 Dec 10 11:10:54 crc kubenswrapper[4682]: I1210 11:10:54.855822 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerDied","Data":"e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17"} Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.349859 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.350713 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.358707 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.365326 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.868763 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerStarted","Data":"0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24"} Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.869170 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.876851 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 11:10:55 crc kubenswrapper[4682]: I1210 11:10:55.888796 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qxpq4" podStartSLOduration=3.448648738 podStartE2EDuration="5.888781001s" podCreationTimestamp="2025-12-10 11:10:50 +0000 UTC" firstStartedPulling="2025-12-10 11:10:52.835354201 +0000 UTC m=+1533.155564951" lastFinishedPulling="2025-12-10 11:10:55.275486454 +0000 UTC m=+1535.595697214" observedRunningTime="2025-12-10 11:10:55.886618644 +0000 UTC m=+1536.206829394" watchObservedRunningTime="2025-12-10 11:10:55.888781001 +0000 UTC m=+1536.208991741" Dec 10 11:10:58 crc kubenswrapper[4682]: I1210 11:10:58.656141 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 11:10:58 crc kubenswrapper[4682]: I1210 11:10:58.657633 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 11:10:58 crc kubenswrapper[4682]: I1210 11:10:58.661625 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 11:10:58 crc kubenswrapper[4682]: I1210 11:10:58.903543 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 11:11:01 crc kubenswrapper[4682]: I1210 11:11:01.225543 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:11:01 crc kubenswrapper[4682]: I1210 11:11:01.225861 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:11:01 crc kubenswrapper[4682]: I1210 11:11:01.548378 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:11:01 crc kubenswrapper[4682]: I1210 11:11:01.973502 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:11:02 crc kubenswrapper[4682]: I1210 11:11:02.021533 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qxpq4"] Dec 10 11:11:03 crc kubenswrapper[4682]: I1210 11:11:03.947006 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qxpq4" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="registry-server" containerID="cri-o://0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24" gracePeriod=2 Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.552865 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.657722 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-catalog-content\") pod \"c57fa67d-be25-4197-ae3c-de720c1c9282\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.657811 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-utilities\") pod \"c57fa67d-be25-4197-ae3c-de720c1c9282\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.657955 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj2hh\" (UniqueName: \"kubernetes.io/projected/c57fa67d-be25-4197-ae3c-de720c1c9282-kube-api-access-mj2hh\") pod \"c57fa67d-be25-4197-ae3c-de720c1c9282\" (UID: \"c57fa67d-be25-4197-ae3c-de720c1c9282\") " Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.658786 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-utilities" (OuterVolumeSpecName: "utilities") pod "c57fa67d-be25-4197-ae3c-de720c1c9282" (UID: "c57fa67d-be25-4197-ae3c-de720c1c9282"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.665400 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c57fa67d-be25-4197-ae3c-de720c1c9282-kube-api-access-mj2hh" (OuterVolumeSpecName: "kube-api-access-mj2hh") pod "c57fa67d-be25-4197-ae3c-de720c1c9282" (UID: "c57fa67d-be25-4197-ae3c-de720c1c9282"). InnerVolumeSpecName "kube-api-access-mj2hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.681074 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c57fa67d-be25-4197-ae3c-de720c1c9282" (UID: "c57fa67d-be25-4197-ae3c-de720c1c9282"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.760777 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.760837 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c57fa67d-be25-4197-ae3c-de720c1c9282-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.760849 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj2hh\" (UniqueName: \"kubernetes.io/projected/c57fa67d-be25-4197-ae3c-de720c1c9282-kube-api-access-mj2hh\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.959539 4682 generic.go:334] "Generic (PLEG): container finished" podID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerID="0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24" exitCode=0 Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.959589 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerDied","Data":"0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24"} Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.959634 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qxpq4" event={"ID":"c57fa67d-be25-4197-ae3c-de720c1c9282","Type":"ContainerDied","Data":"480cfcd95818e384cf0c020c827cd6560e9258901e7314e6eb02ebf64c19c30f"} Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.959669 4682 scope.go:117] "RemoveContainer" containerID="0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.959805 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qxpq4" Dec 10 11:11:04 crc kubenswrapper[4682]: I1210 11:11:04.999954 4682 scope.go:117] "RemoveContainer" containerID="e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.004449 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qxpq4"] Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.014974 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qxpq4"] Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.025440 4682 scope.go:117] "RemoveContainer" containerID="444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.075973 4682 scope.go:117] "RemoveContainer" containerID="0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24" Dec 10 11:11:05 crc kubenswrapper[4682]: E1210 11:11:05.076680 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24\": container with ID starting with 0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24 not found: ID does not exist" containerID="0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.076823 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24"} err="failed to get container status \"0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24\": rpc error: code = NotFound desc = could not find container \"0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24\": container with ID starting with 0049ae084802c061cb1814fe287793b391b73206fcc09c16578f05e289ec9e24 not found: ID does not exist" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.076915 4682 scope.go:117] "RemoveContainer" containerID="e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17" Dec 10 11:11:05 crc kubenswrapper[4682]: E1210 11:11:05.077673 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17\": container with ID starting with e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17 not found: ID does not exist" containerID="e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.078053 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17"} err="failed to get container status \"e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17\": rpc error: code = NotFound desc = could not find container \"e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17\": container with ID starting with e87531fce47598f6bbae20829eef3de4782fe2c6061927e974faf25d37ea5f17 not found: ID does not exist" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.078088 4682 scope.go:117] "RemoveContainer" containerID="444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2" Dec 10 11:11:05 crc kubenswrapper[4682]: E1210 11:11:05.078573 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2\": container with ID starting with 444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2 not found: ID does not exist" containerID="444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2" Dec 10 11:11:05 crc kubenswrapper[4682]: I1210 11:11:05.078599 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2"} err="failed to get container status \"444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2\": rpc error: code = NotFound desc = could not find container \"444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2\": container with ID starting with 444b9af519990d6585fae3442b83040f1b2d594e0f4ad1ab9b6c7fb0acb7e9e2 not found: ID does not exist" Dec 10 11:11:06 crc kubenswrapper[4682]: I1210 11:11:06.394205 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" path="/var/lib/kubelet/pods/c57fa67d-be25-4197-ae3c-de720c1c9282/volumes" Dec 10 11:11:06 crc kubenswrapper[4682]: I1210 11:11:06.478844 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:11:06 crc kubenswrapper[4682]: I1210 11:11:06.479246 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.358128 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-db-sync-9q89f"] Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.374095 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-db-sync-9q89f"] Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.458782 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cloudkitty-db-sync-cdf59"] Dec 10 11:11:09 crc kubenswrapper[4682]: E1210 11:11:09.459181 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="extract-utilities" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.459196 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="extract-utilities" Dec 10 11:11:09 crc kubenswrapper[4682]: E1210 11:11:09.459223 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="extract-content" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.459229 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="extract-content" Dec 10 11:11:09 crc kubenswrapper[4682]: E1210 11:11:09.459254 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="registry-server" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.459260 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="registry-server" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.459440 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c57fa67d-be25-4197-ae3c-de720c1c9282" containerName="registry-server" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.460253 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.462594 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.481300 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-cdf59"] Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.570853 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-config-data\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.570915 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-scripts\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.571185 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j78w\" (UniqueName: \"kubernetes.io/projected/105b676e-6612-406e-984b-86afbf8ede6c-kube-api-access-9j78w\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.571314 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/105b676e-6612-406e-984b-86afbf8ede6c-certs\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.571476 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-combined-ca-bundle\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.673987 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j78w\" (UniqueName: \"kubernetes.io/projected/105b676e-6612-406e-984b-86afbf8ede6c-kube-api-access-9j78w\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.674086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/projected/105b676e-6612-406e-984b-86afbf8ede6c-certs\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.674152 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-combined-ca-bundle\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.674248 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-config-data\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.674271 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-scripts\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.681765 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-config-data\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.683971 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/projected/105b676e-6612-406e-984b-86afbf8ede6c-certs\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.695024 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-scripts\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.699496 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105b676e-6612-406e-984b-86afbf8ede6c-combined-ca-bundle\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.708143 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j78w\" (UniqueName: \"kubernetes.io/projected/105b676e-6612-406e-984b-86afbf8ede6c-kube-api-access-9j78w\") pod \"cloudkitty-db-sync-cdf59\" (UID: \"105b676e-6612-406e-984b-86afbf8ede6c\") " pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:09 crc kubenswrapper[4682]: I1210 11:11:09.789936 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cloudkitty-db-sync-cdf59" Dec 10 11:11:10 crc kubenswrapper[4682]: I1210 11:11:10.263533 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cloudkitty-db-sync-cdf59"] Dec 10 11:11:10 crc kubenswrapper[4682]: E1210 11:11:10.344316 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:11:10 crc kubenswrapper[4682]: E1210 11:11:10.344368 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:11:10 crc kubenswrapper[4682]: E1210 11:11:10.344536 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:11:10 crc kubenswrapper[4682]: E1210 11:11:10.346144 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:11:10 crc kubenswrapper[4682]: I1210 11:11:10.398384 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="997c9b87-b796-40a3-a9c9-cf1e2a3abc4d" path="/var/lib/kubelet/pods/997c9b87-b796-40a3-a9c9-cf1e2a3abc4d/volumes" Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.023572 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cloudkitty-db-sync-cdf59" event={"ID":"105b676e-6612-406e-984b-86afbf8ede6c","Type":"ContainerStarted","Data":"4a6b9c7cf8c96b76749d0959f71bb0006bd844bf0996046da5d9354ad9efa26c"} Dec 10 11:11:11 crc kubenswrapper[4682]: E1210 11:11:11.025406 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.185385 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.185688 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-central-agent" containerID="cri-o://f81a8e442f780c8d84f189b3b7a1ccfb204da7ebad172398273e73fc598eb533" gracePeriod=30 Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.185790 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-notification-agent" containerID="cri-o://0ac738c65821df52285af23cd35d5ba1806fa6635999d4b870563761d9370119" gracePeriod=30 Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.185790 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="sg-core" containerID="cri-o://f8cbb182c7db499799396a4d869ffaf89e95422a86bb24b722f57f155a176374" gracePeriod=30 Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.185807 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="proxy-httpd" containerID="cri-o://7686d84ae9f2cc137d7f67c21fd97c1140ece9bb6b763b63a1237301857f3a80" gracePeriod=30 Dec 10 11:11:11 crc kubenswrapper[4682]: I1210 11:11:11.681501 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.035088 4682 generic.go:334] "Generic (PLEG): container finished" podID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerID="7686d84ae9f2cc137d7f67c21fd97c1140ece9bb6b763b63a1237301857f3a80" exitCode=0 Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.035390 4682 generic.go:334] "Generic (PLEG): container finished" podID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerID="f8cbb182c7db499799396a4d869ffaf89e95422a86bb24b722f57f155a176374" exitCode=2 Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.035169 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerDied","Data":"7686d84ae9f2cc137d7f67c21fd97c1140ece9bb6b763b63a1237301857f3a80"} Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.035437 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerDied","Data":"f8cbb182c7db499799396a4d869ffaf89e95422a86bb24b722f57f155a176374"} Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.035453 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerDied","Data":"f81a8e442f780c8d84f189b3b7a1ccfb204da7ebad172398273e73fc598eb533"} Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.035401 4682 generic.go:334] "Generic (PLEG): container finished" podID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerID="f81a8e442f780c8d84f189b3b7a1ccfb204da7ebad172398273e73fc598eb533" exitCode=0 Dec 10 11:11:12 crc kubenswrapper[4682]: E1210 11:11:12.037043 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:11:12 crc kubenswrapper[4682]: I1210 11:11:12.488112 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.049703 4682 generic.go:334] "Generic (PLEG): container finished" podID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerID="0ac738c65821df52285af23cd35d5ba1806fa6635999d4b870563761d9370119" exitCode=0 Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.049869 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerDied","Data":"0ac738c65821df52285af23cd35d5ba1806fa6635999d4b870563761d9370119"} Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.451136 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553105 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-config-data\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553225 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-sg-core-conf-yaml\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553332 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-ceilometer-tls-certs\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553466 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-log-httpd\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553556 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtrnt\" (UniqueName: \"kubernetes.io/projected/4f238a47-7afd-494d-80d2-1eed26cdb0cc-kube-api-access-xtrnt\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553643 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-combined-ca-bundle\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553706 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-scripts\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.553728 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-run-httpd\") pod \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\" (UID: \"4f238a47-7afd-494d-80d2-1eed26cdb0cc\") " Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.554023 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.554327 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.556136 4682 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.556264 4682 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4f238a47-7afd-494d-80d2-1eed26cdb0cc-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.560246 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-scripts" (OuterVolumeSpecName: "scripts") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.562779 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f238a47-7afd-494d-80d2-1eed26cdb0cc-kube-api-access-xtrnt" (OuterVolumeSpecName: "kube-api-access-xtrnt") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "kube-api-access-xtrnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.634244 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.660356 4682 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.660391 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtrnt\" (UniqueName: \"kubernetes.io/projected/4f238a47-7afd-494d-80d2-1eed26cdb0cc-kube-api-access-xtrnt\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.660412 4682 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.667714 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.762321 4682 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.778137 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.785263 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-config-data" (OuterVolumeSpecName: "config-data") pod "4f238a47-7afd-494d-80d2-1eed26cdb0cc" (UID: "4f238a47-7afd-494d-80d2-1eed26cdb0cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.864136 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:13 crc kubenswrapper[4682]: I1210 11:11:13.864174 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f238a47-7afd-494d-80d2-1eed26cdb0cc-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.078605 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4f238a47-7afd-494d-80d2-1eed26cdb0cc","Type":"ContainerDied","Data":"c422b6bc1f0850356395720a5366acc89daab4fa3a8271b7ede4a8e8312c3781"} Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.078662 4682 scope.go:117] "RemoveContainer" containerID="7686d84ae9f2cc137d7f67c21fd97c1140ece9bb6b763b63a1237301857f3a80" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.078663 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.106583 4682 scope.go:117] "RemoveContainer" containerID="f8cbb182c7db499799396a4d869ffaf89e95422a86bb24b722f57f155a176374" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.126506 4682 scope.go:117] "RemoveContainer" containerID="0ac738c65821df52285af23cd35d5ba1806fa6635999d4b870563761d9370119" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.140299 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.157371 4682 scope.go:117] "RemoveContainer" containerID="f81a8e442f780c8d84f189b3b7a1ccfb204da7ebad172398273e73fc598eb533" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.162006 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.174745 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:11:14 crc kubenswrapper[4682]: E1210 11:11:14.175134 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-central-agent" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175151 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-central-agent" Dec 10 11:11:14 crc kubenswrapper[4682]: E1210 11:11:14.175166 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="sg-core" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175173 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="sg-core" Dec 10 11:11:14 crc kubenswrapper[4682]: E1210 11:11:14.175180 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-notification-agent" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175204 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-notification-agent" Dec 10 11:11:14 crc kubenswrapper[4682]: E1210 11:11:14.175232 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="proxy-httpd" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175237 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="proxy-httpd" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175444 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-notification-agent" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175460 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="proxy-httpd" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175486 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="sg-core" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.175503 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" containerName="ceilometer-central-agent" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.177329 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.182668 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.182897 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.183051 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.208188 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277110 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/58163ec6-c74c-4db2-aad7-c5f598a75856-log-httpd\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277192 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277246 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277287 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277426 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/58163ec6-c74c-4db2-aad7-c5f598a75856-run-httpd\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277620 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-config-data\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277824 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9bz8\" (UniqueName: \"kubernetes.io/projected/58163ec6-c74c-4db2-aad7-c5f598a75856-kube-api-access-l9bz8\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.277925 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-scripts\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.379446 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.384968 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.385637 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.385718 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/58163ec6-c74c-4db2-aad7-c5f598a75856-run-httpd\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.385812 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-config-data\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.385926 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9bz8\" (UniqueName: \"kubernetes.io/projected/58163ec6-c74c-4db2-aad7-c5f598a75856-kube-api-access-l9bz8\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.385992 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-scripts\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.386025 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/58163ec6-c74c-4db2-aad7-c5f598a75856-log-httpd\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.386038 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.386069 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/58163ec6-c74c-4db2-aad7-c5f598a75856-run-httpd\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.386288 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/58163ec6-c74c-4db2-aad7-c5f598a75856-log-httpd\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.389027 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.389760 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.390398 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-scripts\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.401059 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f238a47-7afd-494d-80d2-1eed26cdb0cc" path="/var/lib/kubelet/pods/4f238a47-7afd-494d-80d2-1eed26cdb0cc/volumes" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.413592 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58163ec6-c74c-4db2-aad7-c5f598a75856-config-data\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.421223 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9bz8\" (UniqueName: \"kubernetes.io/projected/58163ec6-c74c-4db2-aad7-c5f598a75856-kube-api-access-l9bz8\") pod \"ceilometer-0\" (UID: \"58163ec6-c74c-4db2-aad7-c5f598a75856\") " pod="openstack/ceilometer-0" Dec 10 11:11:14 crc kubenswrapper[4682]: I1210 11:11:14.503434 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 11:11:15 crc kubenswrapper[4682]: I1210 11:11:15.037114 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 11:11:15 crc kubenswrapper[4682]: I1210 11:11:15.091645 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"58163ec6-c74c-4db2-aad7-c5f598a75856","Type":"ContainerStarted","Data":"72e2a37500d1de1c7c94b9125a4d1142fcf0d6bbd54f87cc60b6c44753262c6b"} Dec 10 11:11:15 crc kubenswrapper[4682]: E1210 11:11:15.129681 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:11:15 crc kubenswrapper[4682]: E1210 11:11:15.129895 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:11:15 crc kubenswrapper[4682]: E1210 11:11:15.130079 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:11:16 crc kubenswrapper[4682]: I1210 11:11:16.521373 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="rabbitmq" containerID="cri-o://91fdcc557b4d22e64f4b9de115191e912904338fe6401293a35d7d82e5808b58" gracePeriod=604796 Dec 10 11:11:17 crc kubenswrapper[4682]: I1210 11:11:17.113228 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"58163ec6-c74c-4db2-aad7-c5f598a75856","Type":"ContainerStarted","Data":"9653b28586454b5452821e4b65343b268420c0c34a3ebabf2784fe2651b95588"} Dec 10 11:11:17 crc kubenswrapper[4682]: I1210 11:11:17.113623 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"58163ec6-c74c-4db2-aad7-c5f598a75856","Type":"ContainerStarted","Data":"132264c4174055706660a623e65f36b1513d2f746417b341fdc749faa3b77bc8"} Dec 10 11:11:17 crc kubenswrapper[4682]: I1210 11:11:17.156840 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="rabbitmq" containerID="cri-o://1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f" gracePeriod=604796 Dec 10 11:11:19 crc kubenswrapper[4682]: E1210 11:11:19.217268 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:11:20 crc kubenswrapper[4682]: I1210 11:11:20.153754 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"58163ec6-c74c-4db2-aad7-c5f598a75856","Type":"ContainerStarted","Data":"f59d0cb97f1c4abbb1fcfa98bb577520302db9dc99dec29be2839e8e2a8dd80b"} Dec 10 11:11:20 crc kubenswrapper[4682]: I1210 11:11:20.154119 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 11:11:20 crc kubenswrapper[4682]: E1210 11:11:20.155614 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:11:20 crc kubenswrapper[4682]: I1210 11:11:20.436248 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Dec 10 11:11:21 crc kubenswrapper[4682]: E1210 11:11:21.164118 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:11:21 crc kubenswrapper[4682]: I1210 11:11:21.217998 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.107:5671: connect: connection refused" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.187172 4682 generic.go:334] "Generic (PLEG): container finished" podID="7362d622-686c-48e5-b0de-562fae10bc35" containerID="91fdcc557b4d22e64f4b9de115191e912904338fe6401293a35d7d82e5808b58" exitCode=0 Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.187299 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7362d622-686c-48e5-b0de-562fae10bc35","Type":"ContainerDied","Data":"91fdcc557b4d22e64f4b9de115191e912904338fe6401293a35d7d82e5808b58"} Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.187734 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7362d622-686c-48e5-b0de-562fae10bc35","Type":"ContainerDied","Data":"e1e5b2899b676f626bc1c8e9b9535c1807f07453d8f6cebfde6ef90716c4ac13"} Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.187751 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1e5b2899b676f626bc1c8e9b9535c1807f07453d8f6cebfde6ef90716c4ac13" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.199132 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.288204 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-confd\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.288254 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-config-data\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.288332 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-server-conf\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.288374 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-plugins-conf\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.288446 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-erlang-cookie\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.288987 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.289096 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2272\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-kube-api-access-l2272\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.289162 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-tls\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.289205 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7362d622-686c-48e5-b0de-562fae10bc35-pod-info\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.289233 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7362d622-686c-48e5-b0de-562fae10bc35-erlang-cookie-secret\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.289279 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-plugins\") pod \"7362d622-686c-48e5-b0de-562fae10bc35\" (UID: \"7362d622-686c-48e5-b0de-562fae10bc35\") " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.290031 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.290418 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.290552 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.311161 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7362d622-686c-48e5-b0de-562fae10bc35-pod-info" (OuterVolumeSpecName: "pod-info") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.312363 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.313809 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7362d622-686c-48e5-b0de-562fae10bc35-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.318038 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-kube-api-access-l2272" (OuterVolumeSpecName: "kube-api-access-l2272") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "kube-api-access-l2272". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.333366 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00" (OuterVolumeSpecName: "persistence") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.370130 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-config-data" (OuterVolumeSpecName: "config-data") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.395873 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396417 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") on node \"crc\" " Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396549 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2272\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-kube-api-access-l2272\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396629 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396697 4682 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7362d622-686c-48e5-b0de-562fae10bc35-pod-info\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396761 4682 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7362d622-686c-48e5-b0de-562fae10bc35-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396822 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.396884 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.397268 4682 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.456786 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-server-conf" (OuterVolumeSpecName: "server-conf") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.470846 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.471035 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00") on node "crc" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.499732 4682 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7362d622-686c-48e5-b0de-562fae10bc35-server-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.500095 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.509860 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7362d622-686c-48e5-b0de-562fae10bc35" (UID: "7362d622-686c-48e5-b0de-562fae10bc35"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:23 crc kubenswrapper[4682]: I1210 11:11:23.602138 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7362d622-686c-48e5-b0de-562fae10bc35-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.076148 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129056 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211ac37-0b53-466f-ad83-7062f681c32b-erlang-cookie-secret\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129586 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129699 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-confd\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129721 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-765cp\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-kube-api-access-765cp\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129748 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-tls\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129805 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-plugins-conf\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129844 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-config-data\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129874 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-erlang-cookie\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129932 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-plugins\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.129965 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211ac37-0b53-466f-ad83-7062f681c32b-pod-info\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.130000 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-server-conf\") pod \"c211ac37-0b53-466f-ad83-7062f681c32b\" (UID: \"c211ac37-0b53-466f-ad83-7062f681c32b\") " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.135759 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c211ac37-0b53-466f-ad83-7062f681c32b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.136828 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.137588 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.140371 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.140631 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.142376 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c211ac37-0b53-466f-ad83-7062f681c32b-pod-info" (OuterVolumeSpecName: "pod-info") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.149710 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-kube-api-access-765cp" (OuterVolumeSpecName: "kube-api-access-765cp") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "kube-api-access-765cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.160789 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2" (OuterVolumeSpecName: "persistence") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "pvc-7e840cb9-4f54-49e5-80da-54756541d8a2". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.176647 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-config-data" (OuterVolumeSpecName: "config-data") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.205288 4682 generic.go:334] "Generic (PLEG): container finished" podID="c211ac37-0b53-466f-ad83-7062f681c32b" containerID="1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f" exitCode=0 Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.205401 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.206386 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c211ac37-0b53-466f-ad83-7062f681c32b","Type":"ContainerDied","Data":"1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f"} Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.206508 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c211ac37-0b53-466f-ad83-7062f681c32b","Type":"ContainerDied","Data":"39eaab8e7c09a0655115f029b9346c4ecdec0aefbf80f7b00f0fce3e382dac6b"} Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.206545 4682 scope.go:117] "RemoveContainer" containerID="1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.206911 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233662 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-765cp\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-kube-api-access-765cp\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233704 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233714 4682 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233724 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233737 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233749 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233760 4682 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c211ac37-0b53-466f-ad83-7062f681c32b-pod-info\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233772 4682 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c211ac37-0b53-466f-ad83-7062f681c32b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.233803 4682 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") on node \"crc\" " Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.275627 4682 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.275804 4682 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7e840cb9-4f54-49e5-80da-54756541d8a2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2") on node "crc" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.278209 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-server-conf" (OuterVolumeSpecName: "server-conf") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.306111 4682 scope.go:117] "RemoveContainer" containerID="ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.337172 4682 reconciler_common.go:293] "Volume detached for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.337214 4682 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c211ac37-0b53-466f-ad83-7062f681c32b-server-conf\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.337311 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.363441 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.363560 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.363963 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="setup-container" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.363975 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="setup-container" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.364000 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="setup-container" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.364088 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="setup-container" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.364124 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="rabbitmq" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.364131 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="rabbitmq" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.364169 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="rabbitmq" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.364175 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="rabbitmq" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.364357 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="7362d622-686c-48e5-b0de-562fae10bc35" containerName="rabbitmq" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.364368 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" containerName="rabbitmq" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.365415 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.369448 4682 scope.go:117] "RemoveContainer" containerID="1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.369991 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f\": container with ID starting with 1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f not found: ID does not exist" containerID="1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.370019 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f"} err="failed to get container status \"1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f\": rpc error: code = NotFound desc = could not find container \"1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f\": container with ID starting with 1310e0bb88be9b6509453f30f4cb31d52ffaf605a9ef457f9cdcab64c30a2c8f not found: ID does not exist" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.370039 4682 scope.go:117] "RemoveContainer" containerID="ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.372097 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b\": container with ID starting with ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b not found: ID does not exist" containerID="ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.372132 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b"} err="failed to get container status \"ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b\": rpc error: code = NotFound desc = could not find container \"ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b\": container with ID starting with ffbe3adca9c0c62209b7671ab439ea1eb3795294266508f5997585d6ef992d4b not found: ID does not exist" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.372593 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.372809 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.373997 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zrlpn" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.374165 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c211ac37-0b53-466f-ad83-7062f681c32b" (UID: "c211ac37-0b53-466f-ad83-7062f681c32b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.374237 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.374812 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.374957 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.374594 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.406133 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7362d622-686c-48e5-b0de-562fae10bc35" path="/var/lib/kubelet/pods/7362d622-686c-48e5-b0de-562fae10bc35/volumes" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.407658 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438485 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438545 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438600 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438622 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438655 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438679 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct7m8\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-kube-api-access-ct7m8\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438705 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438747 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438784 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-config-data\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438821 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438836 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.438918 4682 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c211ac37-0b53-466f-ad83-7062f681c32b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.539383 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.539802 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.539953 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540511 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540575 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540609 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540630 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540663 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540682 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct7m8\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-kube-api-access-ct7m8\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540710 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540739 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540777 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-config-data\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540809 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.540827 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: E1210 11:11:24.541177 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.541232 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.541920 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-config-data\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.542142 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.542422 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.544031 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.545020 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.545068 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ae9f9a2cec2d07eb6ee17a3db3aa6e274ca33ba3628d7fecb8aaa98a76caa599/globalmount\"" pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.546611 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.548416 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.554650 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.554763 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.557513 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.565305 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct7m8\" (UniqueName: \"kubernetes.io/projected/ce19556c-31cc-4e0a-b092-c5cfb2cf815a-kube-api-access-ct7m8\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.571073 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.583385 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.585616 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.589907 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.590033 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.590104 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.590372 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.590508 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.590593 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-87b4g" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.590674 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.597183 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642250 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642318 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b2bb3f39-3fa9-42c1-abea-06fd2630a819-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642422 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzpjv\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-kube-api-access-lzpjv\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642449 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642507 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642587 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642645 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642678 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642739 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b2bb3f39-3fa9-42c1-abea-06fd2630a819-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642829 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.642864 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.652411 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b01950fd-abf6-4e58-af84-70ddc1bc0c00\") pod \"rabbitmq-server-0\" (UID: \"ce19556c-31cc-4e0a-b092-c5cfb2cf815a\") " pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.703377 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745061 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745123 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745191 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b2bb3f39-3fa9-42c1-abea-06fd2630a819-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745270 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745300 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745335 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745366 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b2bb3f39-3fa9-42c1-abea-06fd2630a819-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745420 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzpjv\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-kube-api-access-lzpjv\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745443 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745493 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.745582 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.757283 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.758211 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.760517 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.761224 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.763848 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b2bb3f39-3fa9-42c1-abea-06fd2630a819-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.764358 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.769249 4682 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.769284 4682 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9175dee665a5343fd54b62099e2589ce8aeecb32571dc715a22448f6bd4b0462/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.788849 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b2bb3f39-3fa9-42c1-abea-06fd2630a819-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.793711 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b2bb3f39-3fa9-42c1-abea-06fd2630a819-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.798259 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b2bb3f39-3fa9-42c1-abea-06fd2630a819-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.843026 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzpjv\" (UniqueName: \"kubernetes.io/projected/b2bb3f39-3fa9-42c1-abea-06fd2630a819-kube-api-access-lzpjv\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.896317 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e840cb9-4f54-49e5-80da-54756541d8a2\") pod \"rabbitmq-cell1-server-0\" (UID: \"b2bb3f39-3fa9-42c1-abea-06fd2630a819\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:24 crc kubenswrapper[4682]: I1210 11:11:24.907550 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:11:25 crc kubenswrapper[4682]: I1210 11:11:25.271809 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 11:11:25 crc kubenswrapper[4682]: I1210 11:11:25.399667 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 11:11:25 crc kubenswrapper[4682]: W1210 11:11:25.403015 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2bb3f39_3fa9_42c1_abea_06fd2630a819.slice/crio-386689199ffe5436dfe61aaaac4cbbd4d5632a6b46ee46e2db825a66d33f7bf5 WatchSource:0}: Error finding container 386689199ffe5436dfe61aaaac4cbbd4d5632a6b46ee46e2db825a66d33f7bf5: Status 404 returned error can't find the container with id 386689199ffe5436dfe61aaaac4cbbd4d5632a6b46ee46e2db825a66d33f7bf5 Dec 10 11:11:26 crc kubenswrapper[4682]: I1210 11:11:26.233333 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b2bb3f39-3fa9-42c1-abea-06fd2630a819","Type":"ContainerStarted","Data":"386689199ffe5436dfe61aaaac4cbbd4d5632a6b46ee46e2db825a66d33f7bf5"} Dec 10 11:11:26 crc kubenswrapper[4682]: I1210 11:11:26.238245 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ce19556c-31cc-4e0a-b092-c5cfb2cf815a","Type":"ContainerStarted","Data":"5857fd7a794f75d0c779f507c3cc9911d6daea05f658179d02ba2d0055642724"} Dec 10 11:11:26 crc kubenswrapper[4682]: I1210 11:11:26.393728 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c211ac37-0b53-466f-ad83-7062f681c32b" path="/var/lib/kubelet/pods/c211ac37-0b53-466f-ad83-7062f681c32b/volumes" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.248416 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ce19556c-31cc-4e0a-b092-c5cfb2cf815a","Type":"ContainerStarted","Data":"d2229b77e0da9da5c22d3309bfabfdb12cd939789521796d8a8aa7fb99b401eb"} Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.250019 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b2bb3f39-3fa9-42c1-abea-06fd2630a819","Type":"ContainerStarted","Data":"27be6649e4a0ecfbac5fc2b5d07d378af1c62f352ac5a42260c1fc25b156effb"} Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.504072 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-595979776c-jlkf8"] Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.507249 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.509141 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.543560 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-595979776c-jlkf8"] Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.616563 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-openstack-edpm-ipam\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.616658 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-svc\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.616834 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-config\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.617044 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh69m\" (UniqueName: \"kubernetes.io/projected/528a3098-5c3b-436e-80a9-319169913494-kube-api-access-bh69m\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.617299 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-sb\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.617343 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-swift-storage-0\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.617378 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-nb\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.718989 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh69m\" (UniqueName: \"kubernetes.io/projected/528a3098-5c3b-436e-80a9-319169913494-kube-api-access-bh69m\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.719050 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-sb\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.719093 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-swift-storage-0\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.719116 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-nb\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.719200 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-openstack-edpm-ipam\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.719252 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-svc\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.719352 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-config\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.720112 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-sb\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.720287 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-openstack-edpm-ipam\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.720345 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-swift-storage-0\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.720633 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-config\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.720637 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-svc\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.720848 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-nb\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.739853 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh69m\" (UniqueName: \"kubernetes.io/projected/528a3098-5c3b-436e-80a9-319169913494-kube-api-access-bh69m\") pod \"dnsmasq-dns-595979776c-jlkf8\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:27 crc kubenswrapper[4682]: I1210 11:11:27.829694 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:28 crc kubenswrapper[4682]: W1210 11:11:28.298249 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod528a3098_5c3b_436e_80a9_319169913494.slice/crio-db5e6454b861460f684cf4ba9f566161eda7f644babc1e2385e30954d2d5d1ba WatchSource:0}: Error finding container db5e6454b861460f684cf4ba9f566161eda7f644babc1e2385e30954d2d5d1ba: Status 404 returned error can't find the container with id db5e6454b861460f684cf4ba9f566161eda7f644babc1e2385e30954d2d5d1ba Dec 10 11:11:28 crc kubenswrapper[4682]: I1210 11:11:28.299535 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-595979776c-jlkf8"] Dec 10 11:11:29 crc kubenswrapper[4682]: I1210 11:11:29.271072 4682 generic.go:334] "Generic (PLEG): container finished" podID="528a3098-5c3b-436e-80a9-319169913494" containerID="36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26" exitCode=0 Dec 10 11:11:29 crc kubenswrapper[4682]: I1210 11:11:29.271249 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-jlkf8" event={"ID":"528a3098-5c3b-436e-80a9-319169913494","Type":"ContainerDied","Data":"36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26"} Dec 10 11:11:29 crc kubenswrapper[4682]: I1210 11:11:29.271636 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-jlkf8" event={"ID":"528a3098-5c3b-436e-80a9-319169913494","Type":"ContainerStarted","Data":"db5e6454b861460f684cf4ba9f566161eda7f644babc1e2385e30954d2d5d1ba"} Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.285258 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-jlkf8" event={"ID":"528a3098-5c3b-436e-80a9-319169913494","Type":"ContainerStarted","Data":"3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4"} Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.285862 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.312228 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-595979776c-jlkf8" podStartSLOduration=3.312206031 podStartE2EDuration="3.312206031s" podCreationTimestamp="2025-12-10 11:11:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:11:30.306863277 +0000 UTC m=+1570.627074027" watchObservedRunningTime="2025-12-10 11:11:30.312206031 +0000 UTC m=+1570.632416781" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.704146 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xgbrj"] Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.706956 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.725079 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xgbrj"] Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.795332 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55pvf\" (UniqueName: \"kubernetes.io/projected/04f85ab1-59e1-486f-9492-6a7d3848eac3-kube-api-access-55pvf\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.795656 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-utilities\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.795966 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-catalog-content\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.898257 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-utilities\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.898372 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-catalog-content\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.898437 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55pvf\" (UniqueName: \"kubernetes.io/projected/04f85ab1-59e1-486f-9492-6a7d3848eac3-kube-api-access-55pvf\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.899255 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-utilities\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.899500 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-catalog-content\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:30 crc kubenswrapper[4682]: I1210 11:11:30.950555 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55pvf\" (UniqueName: \"kubernetes.io/projected/04f85ab1-59e1-486f-9492-6a7d3848eac3-kube-api-access-55pvf\") pod \"certified-operators-xgbrj\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:31 crc kubenswrapper[4682]: I1210 11:11:31.034964 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:31 crc kubenswrapper[4682]: I1210 11:11:31.233051 4682 scope.go:117] "RemoveContainer" containerID="d334adfb35be27b46279f9611f45e3a210c4fdbf44ceb555dadc28eb89ea99ae" Dec 10 11:11:31 crc kubenswrapper[4682]: I1210 11:11:31.531215 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xgbrj"] Dec 10 11:11:32 crc kubenswrapper[4682]: I1210 11:11:32.337607 4682 generic.go:334] "Generic (PLEG): container finished" podID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerID="c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3" exitCode=0 Dec 10 11:11:32 crc kubenswrapper[4682]: I1210 11:11:32.337664 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerDied","Data":"c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3"} Dec 10 11:11:32 crc kubenswrapper[4682]: I1210 11:11:32.337900 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerStarted","Data":"0d51b6e6ed16939b40bd6e2812e8eaf002b114227c89ff77e09c530d3724bd0a"} Dec 10 11:11:33 crc kubenswrapper[4682]: I1210 11:11:33.362964 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerStarted","Data":"d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f"} Dec 10 11:11:34 crc kubenswrapper[4682]: I1210 11:11:34.375609 4682 generic.go:334] "Generic (PLEG): container finished" podID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerID="d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f" exitCode=0 Dec 10 11:11:34 crc kubenswrapper[4682]: I1210 11:11:34.375712 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerDied","Data":"d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f"} Dec 10 11:11:34 crc kubenswrapper[4682]: I1210 11:11:34.411362 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 11:11:34 crc kubenswrapper[4682]: E1210 11:11:34.503454 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:11:34 crc kubenswrapper[4682]: E1210 11:11:34.503591 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:11:34 crc kubenswrapper[4682]: E1210 11:11:34.503711 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:11:34 crc kubenswrapper[4682]: E1210 11:11:34.504950 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:11:35 crc kubenswrapper[4682]: I1210 11:11:35.399564 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerStarted","Data":"28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a"} Dec 10 11:11:35 crc kubenswrapper[4682]: E1210 11:11:35.402161 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:11:35 crc kubenswrapper[4682]: I1210 11:11:35.428786 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xgbrj" podStartSLOduration=2.8872004159999998 podStartE2EDuration="5.42876417s" podCreationTimestamp="2025-12-10 11:11:30 +0000 UTC" firstStartedPulling="2025-12-10 11:11:32.339846276 +0000 UTC m=+1572.660057026" lastFinishedPulling="2025-12-10 11:11:34.88141003 +0000 UTC m=+1575.201620780" observedRunningTime="2025-12-10 11:11:35.419164564 +0000 UTC m=+1575.739375314" watchObservedRunningTime="2025-12-10 11:11:35.42876417 +0000 UTC m=+1575.748974940" Dec 10 11:11:36 crc kubenswrapper[4682]: I1210 11:11:36.478380 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:11:36 crc kubenswrapper[4682]: I1210 11:11:36.478701 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:11:37 crc kubenswrapper[4682]: I1210 11:11:37.831689 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:37 crc kubenswrapper[4682]: I1210 11:11:37.917938 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-rx5lf"] Dec 10 11:11:37 crc kubenswrapper[4682]: I1210 11:11:37.918220 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerName="dnsmasq-dns" containerID="cri-o://a151ea25ced91cbe769907f17c8f42050d7782476cba4e2bea6c2bf5dcebad46" gracePeriod=10 Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.099696 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5475ccd585-x8798"] Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.102251 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.124789 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5475ccd585-x8798"] Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257206 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-dns-swift-storage-0\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257318 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lkk2\" (UniqueName: \"kubernetes.io/projected/33cd4736-a475-41db-acb5-28015f2cf6a0-kube-api-access-7lkk2\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257423 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-openstack-edpm-ipam\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257462 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-ovsdbserver-nb\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257556 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-ovsdbserver-sb\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257586 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-dns-svc\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.257644 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-config\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359067 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-ovsdbserver-sb\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359111 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-dns-svc\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359160 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-config\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359190 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-dns-swift-storage-0\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359249 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lkk2\" (UniqueName: \"kubernetes.io/projected/33cd4736-a475-41db-acb5-28015f2cf6a0-kube-api-access-7lkk2\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359321 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-openstack-edpm-ipam\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.359350 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-ovsdbserver-nb\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.360294 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-ovsdbserver-nb\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.360851 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-ovsdbserver-sb\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.361325 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-dns-svc\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.361953 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-config\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.362378 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-dns-swift-storage-0\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.362602 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/33cd4736-a475-41db-acb5-28015f2cf6a0-openstack-edpm-ipam\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.379235 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lkk2\" (UniqueName: \"kubernetes.io/projected/33cd4736-a475-41db-acb5-28015f2cf6a0-kube-api-access-7lkk2\") pod \"dnsmasq-dns-5475ccd585-x8798\" (UID: \"33cd4736-a475-41db-acb5-28015f2cf6a0\") " pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: E1210 11:11:38.383977 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.430746 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.431072 4682 generic.go:334] "Generic (PLEG): container finished" podID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerID="a151ea25ced91cbe769907f17c8f42050d7782476cba4e2bea6c2bf5dcebad46" exitCode=0 Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.431114 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" event={"ID":"a64a7fde-65b5-4376-ac93-deb06f0ceb93","Type":"ContainerDied","Data":"a151ea25ced91cbe769907f17c8f42050d7782476cba4e2bea6c2bf5dcebad46"} Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.586650 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.670236 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-sb\") pod \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.670320 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-config\") pod \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.670385 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-nb\") pod \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.670419 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-swift-storage-0\") pod \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.670512 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4tsd\" (UniqueName: \"kubernetes.io/projected/a64a7fde-65b5-4376-ac93-deb06f0ceb93-kube-api-access-m4tsd\") pod \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.670579 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-svc\") pod \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\" (UID: \"a64a7fde-65b5-4376-ac93-deb06f0ceb93\") " Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.706077 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a64a7fde-65b5-4376-ac93-deb06f0ceb93-kube-api-access-m4tsd" (OuterVolumeSpecName: "kube-api-access-m4tsd") pod "a64a7fde-65b5-4376-ac93-deb06f0ceb93" (UID: "a64a7fde-65b5-4376-ac93-deb06f0ceb93"). InnerVolumeSpecName "kube-api-access-m4tsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.761952 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a64a7fde-65b5-4376-ac93-deb06f0ceb93" (UID: "a64a7fde-65b5-4376-ac93-deb06f0ceb93"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.763680 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a64a7fde-65b5-4376-ac93-deb06f0ceb93" (UID: "a64a7fde-65b5-4376-ac93-deb06f0ceb93"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.765315 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-config" (OuterVolumeSpecName: "config") pod "a64a7fde-65b5-4376-ac93-deb06f0ceb93" (UID: "a64a7fde-65b5-4376-ac93-deb06f0ceb93"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.773813 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4tsd\" (UniqueName: \"kubernetes.io/projected/a64a7fde-65b5-4376-ac93-deb06f0ceb93-kube-api-access-m4tsd\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.773858 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.773868 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.773876 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.787110 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a64a7fde-65b5-4376-ac93-deb06f0ceb93" (UID: "a64a7fde-65b5-4376-ac93-deb06f0ceb93"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.800295 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a64a7fde-65b5-4376-ac93-deb06f0ceb93" (UID: "a64a7fde-65b5-4376-ac93-deb06f0ceb93"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.875468 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.876897 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a64a7fde-65b5-4376-ac93-deb06f0ceb93-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:38 crc kubenswrapper[4682]: W1210 11:11:38.919973 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33cd4736_a475_41db_acb5_28015f2cf6a0.slice/crio-0d46fd2e0a18565f7013e9b3f44c2ce6497701e9ce8574ce665c13dc0daab7ed WatchSource:0}: Error finding container 0d46fd2e0a18565f7013e9b3f44c2ce6497701e9ce8574ce665c13dc0daab7ed: Status 404 returned error can't find the container with id 0d46fd2e0a18565f7013e9b3f44c2ce6497701e9ce8574ce665c13dc0daab7ed Dec 10 11:11:38 crc kubenswrapper[4682]: I1210 11:11:38.920438 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5475ccd585-x8798"] Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.448155 4682 generic.go:334] "Generic (PLEG): container finished" podID="33cd4736-a475-41db-acb5-28015f2cf6a0" containerID="b0cd0043fffb0121bdd3ca2dba1cfdf7c443141fd8e08ed954f288a5208c65bb" exitCode=0 Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.448248 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5475ccd585-x8798" event={"ID":"33cd4736-a475-41db-acb5-28015f2cf6a0","Type":"ContainerDied","Data":"b0cd0043fffb0121bdd3ca2dba1cfdf7c443141fd8e08ed954f288a5208c65bb"} Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.448742 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5475ccd585-x8798" event={"ID":"33cd4736-a475-41db-acb5-28015f2cf6a0","Type":"ContainerStarted","Data":"0d46fd2e0a18565f7013e9b3f44c2ce6497701e9ce8574ce665c13dc0daab7ed"} Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.472693 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" event={"ID":"a64a7fde-65b5-4376-ac93-deb06f0ceb93","Type":"ContainerDied","Data":"517d22be77f5bd75b7377a82364799557a1a19cfedcc0356e41caa013b6ea254"} Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.472751 4682 scope.go:117] "RemoveContainer" containerID="a151ea25ced91cbe769907f17c8f42050d7782476cba4e2bea6c2bf5dcebad46" Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.472960 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78468d7767-rx5lf" Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.656308 4682 scope.go:117] "RemoveContainer" containerID="dfc36880e5be5501910b9050a9834872ce0210072959c36c1ea94a040bf2b50f" Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.682105 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-rx5lf"] Dec 10 11:11:39 crc kubenswrapper[4682]: I1210 11:11:39.702425 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78468d7767-rx5lf"] Dec 10 11:11:40 crc kubenswrapper[4682]: I1210 11:11:40.391615 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" path="/var/lib/kubelet/pods/a64a7fde-65b5-4376-ac93-deb06f0ceb93/volumes" Dec 10 11:11:40 crc kubenswrapper[4682]: I1210 11:11:40.483775 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5475ccd585-x8798" event={"ID":"33cd4736-a475-41db-acb5-28015f2cf6a0","Type":"ContainerStarted","Data":"d7b6ac4d53b4145c4d45faf910ec43f6eaa2119f724edcf1f7df7b61e1cce85c"} Dec 10 11:11:40 crc kubenswrapper[4682]: I1210 11:11:40.483914 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:40 crc kubenswrapper[4682]: I1210 11:11:40.502901 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5475ccd585-x8798" podStartSLOduration=2.502876229 podStartE2EDuration="2.502876229s" podCreationTimestamp="2025-12-10 11:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:11:40.500916009 +0000 UTC m=+1580.821126789" watchObservedRunningTime="2025-12-10 11:11:40.502876229 +0000 UTC m=+1580.823086989" Dec 10 11:11:41 crc kubenswrapper[4682]: I1210 11:11:41.035345 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:41 crc kubenswrapper[4682]: I1210 11:11:41.035390 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:41 crc kubenswrapper[4682]: I1210 11:11:41.080899 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:41 crc kubenswrapper[4682]: I1210 11:11:41.538231 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:41 crc kubenswrapper[4682]: I1210 11:11:41.592821 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xgbrj"] Dec 10 11:11:43 crc kubenswrapper[4682]: I1210 11:11:43.511924 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xgbrj" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="registry-server" containerID="cri-o://28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a" gracePeriod=2 Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.170642 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.290548 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-catalog-content\") pod \"04f85ab1-59e1-486f-9492-6a7d3848eac3\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.290990 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-utilities\") pod \"04f85ab1-59e1-486f-9492-6a7d3848eac3\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.291022 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55pvf\" (UniqueName: \"kubernetes.io/projected/04f85ab1-59e1-486f-9492-6a7d3848eac3-kube-api-access-55pvf\") pod \"04f85ab1-59e1-486f-9492-6a7d3848eac3\" (UID: \"04f85ab1-59e1-486f-9492-6a7d3848eac3\") " Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.291882 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-utilities" (OuterVolumeSpecName: "utilities") pod "04f85ab1-59e1-486f-9492-6a7d3848eac3" (UID: "04f85ab1-59e1-486f-9492-6a7d3848eac3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.298009 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04f85ab1-59e1-486f-9492-6a7d3848eac3-kube-api-access-55pvf" (OuterVolumeSpecName: "kube-api-access-55pvf") pod "04f85ab1-59e1-486f-9492-6a7d3848eac3" (UID: "04f85ab1-59e1-486f-9492-6a7d3848eac3"). InnerVolumeSpecName "kube-api-access-55pvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.350845 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04f85ab1-59e1-486f-9492-6a7d3848eac3" (UID: "04f85ab1-59e1-486f-9492-6a7d3848eac3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.393920 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.393950 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04f85ab1-59e1-486f-9492-6a7d3848eac3-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.393959 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55pvf\" (UniqueName: \"kubernetes.io/projected/04f85ab1-59e1-486f-9492-6a7d3848eac3-kube-api-access-55pvf\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.523546 4682 generic.go:334] "Generic (PLEG): container finished" podID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerID="28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a" exitCode=0 Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.523605 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerDied","Data":"28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a"} Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.523621 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xgbrj" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.523645 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xgbrj" event={"ID":"04f85ab1-59e1-486f-9492-6a7d3848eac3","Type":"ContainerDied","Data":"0d51b6e6ed16939b40bd6e2812e8eaf002b114227c89ff77e09c530d3724bd0a"} Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.523667 4682 scope.go:117] "RemoveContainer" containerID="28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.558948 4682 scope.go:117] "RemoveContainer" containerID="d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.561176 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xgbrj"] Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.576252 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xgbrj"] Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.588874 4682 scope.go:117] "RemoveContainer" containerID="c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.631841 4682 scope.go:117] "RemoveContainer" containerID="28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a" Dec 10 11:11:44 crc kubenswrapper[4682]: E1210 11:11:44.632319 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a\": container with ID starting with 28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a not found: ID does not exist" containerID="28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.632354 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a"} err="failed to get container status \"28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a\": rpc error: code = NotFound desc = could not find container \"28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a\": container with ID starting with 28bcac832a231f3b26476a6bcab6b14c7f0f05b2344e2b035b78ed9fc94fe46a not found: ID does not exist" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.632376 4682 scope.go:117] "RemoveContainer" containerID="d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f" Dec 10 11:11:44 crc kubenswrapper[4682]: E1210 11:11:44.632759 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f\": container with ID starting with d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f not found: ID does not exist" containerID="d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.632793 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f"} err="failed to get container status \"d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f\": rpc error: code = NotFound desc = could not find container \"d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f\": container with ID starting with d0abb2de5cd69705ac575d0aa437687a8ecfe7b0faa12d3c06438402c5b6177f not found: ID does not exist" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.632813 4682 scope.go:117] "RemoveContainer" containerID="c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3" Dec 10 11:11:44 crc kubenswrapper[4682]: E1210 11:11:44.633101 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3\": container with ID starting with c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3 not found: ID does not exist" containerID="c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3" Dec 10 11:11:44 crc kubenswrapper[4682]: I1210 11:11:44.633131 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3"} err="failed to get container status \"c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3\": rpc error: code = NotFound desc = could not find container \"c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3\": container with ID starting with c32c88db2e7b3eac2b3fa319cb8a08225229b72d24300a9b1551b0c89472e1e3 not found: ID does not exist" Dec 10 11:11:46 crc kubenswrapper[4682]: I1210 11:11:46.414149 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" path="/var/lib/kubelet/pods/04f85ab1-59e1-486f-9492-6a7d3848eac3/volumes" Dec 10 11:11:48 crc kubenswrapper[4682]: E1210 11:11:48.384627 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:11:48 crc kubenswrapper[4682]: I1210 11:11:48.433099 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5475ccd585-x8798" Dec 10 11:11:48 crc kubenswrapper[4682]: I1210 11:11:48.497712 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-595979776c-jlkf8"] Dec 10 11:11:48 crc kubenswrapper[4682]: I1210 11:11:48.497963 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-595979776c-jlkf8" podUID="528a3098-5c3b-436e-80a9-319169913494" containerName="dnsmasq-dns" containerID="cri-o://3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4" gracePeriod=10 Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.141325 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.288832 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-swift-storage-0\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.288885 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bh69m\" (UniqueName: \"kubernetes.io/projected/528a3098-5c3b-436e-80a9-319169913494-kube-api-access-bh69m\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.288910 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-openstack-edpm-ipam\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.288956 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-sb\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.289003 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-svc\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.289103 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-config\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.289181 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-nb\") pod \"528a3098-5c3b-436e-80a9-319169913494\" (UID: \"528a3098-5c3b-436e-80a9-319169913494\") " Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.294843 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/528a3098-5c3b-436e-80a9-319169913494-kube-api-access-bh69m" (OuterVolumeSpecName: "kube-api-access-bh69m") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "kube-api-access-bh69m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.345826 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.351190 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.351200 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.357553 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.358217 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.365530 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-config" (OuterVolumeSpecName: "config") pod "528a3098-5c3b-436e-80a9-319169913494" (UID: "528a3098-5c3b-436e-80a9-319169913494"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.392050 4682 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.393653 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bh69m\" (UniqueName: \"kubernetes.io/projected/528a3098-5c3b-436e-80a9-319169913494-kube-api-access-bh69m\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.393738 4682 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.393818 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.393899 4682 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.394899 4682 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-config\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.394974 4682 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/528a3098-5c3b-436e-80a9-319169913494-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 11:11:49 crc kubenswrapper[4682]: E1210 11:11:49.518249 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:11:49 crc kubenswrapper[4682]: E1210 11:11:49.518392 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:11:49 crc kubenswrapper[4682]: E1210 11:11:49.518606 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:11:49 crc kubenswrapper[4682]: E1210 11:11:49.519849 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.582044 4682 generic.go:334] "Generic (PLEG): container finished" podID="528a3098-5c3b-436e-80a9-319169913494" containerID="3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4" exitCode=0 Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.582128 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-595979776c-jlkf8" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.582129 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-jlkf8" event={"ID":"528a3098-5c3b-436e-80a9-319169913494","Type":"ContainerDied","Data":"3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4"} Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.582602 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-595979776c-jlkf8" event={"ID":"528a3098-5c3b-436e-80a9-319169913494","Type":"ContainerDied","Data":"db5e6454b861460f684cf4ba9f566161eda7f644babc1e2385e30954d2d5d1ba"} Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.582626 4682 scope.go:117] "RemoveContainer" containerID="3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.607298 4682 scope.go:117] "RemoveContainer" containerID="36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.630307 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-595979776c-jlkf8"] Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.647030 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-595979776c-jlkf8"] Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.666500 4682 scope.go:117] "RemoveContainer" containerID="3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4" Dec 10 11:11:49 crc kubenswrapper[4682]: E1210 11:11:49.666982 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4\": container with ID starting with 3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4 not found: ID does not exist" containerID="3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.667034 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4"} err="failed to get container status \"3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4\": rpc error: code = NotFound desc = could not find container \"3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4\": container with ID starting with 3e5a925659cd626b7659b4d99d29fc4e07b4c2d9585fad4802cbd6b06ecaeca4 not found: ID does not exist" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.667067 4682 scope.go:117] "RemoveContainer" containerID="36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26" Dec 10 11:11:49 crc kubenswrapper[4682]: E1210 11:11:49.667372 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26\": container with ID starting with 36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26 not found: ID does not exist" containerID="36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26" Dec 10 11:11:49 crc kubenswrapper[4682]: I1210 11:11:49.667486 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26"} err="failed to get container status \"36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26\": rpc error: code = NotFound desc = could not find container \"36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26\": container with ID starting with 36ff4f0d42e829c5c324e042616a2185b903c3795f9a76fb93684def0e1c7f26 not found: ID does not exist" Dec 10 11:11:50 crc kubenswrapper[4682]: I1210 11:11:50.391617 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="528a3098-5c3b-436e-80a9-319169913494" path="/var/lib/kubelet/pods/528a3098-5c3b-436e-80a9-319169913494/volumes" Dec 10 11:11:59 crc kubenswrapper[4682]: I1210 11:11:59.703836 4682 generic.go:334] "Generic (PLEG): container finished" podID="ce19556c-31cc-4e0a-b092-c5cfb2cf815a" containerID="d2229b77e0da9da5c22d3309bfabfdb12cd939789521796d8a8aa7fb99b401eb" exitCode=0 Dec 10 11:11:59 crc kubenswrapper[4682]: I1210 11:11:59.703914 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ce19556c-31cc-4e0a-b092-c5cfb2cf815a","Type":"ContainerDied","Data":"d2229b77e0da9da5c22d3309bfabfdb12cd939789521796d8a8aa7fb99b401eb"} Dec 10 11:11:59 crc kubenswrapper[4682]: I1210 11:11:59.705998 4682 generic.go:334] "Generic (PLEG): container finished" podID="b2bb3f39-3fa9-42c1-abea-06fd2630a819" containerID="27be6649e4a0ecfbac5fc2b5d07d378af1c62f352ac5a42260c1fc25b156effb" exitCode=0 Dec 10 11:11:59 crc kubenswrapper[4682]: I1210 11:11:59.706024 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b2bb3f39-3fa9-42c1-abea-06fd2630a819","Type":"ContainerDied","Data":"27be6649e4a0ecfbac5fc2b5d07d378af1c62f352ac5a42260c1fc25b156effb"} Dec 10 11:12:00 crc kubenswrapper[4682]: I1210 11:12:00.824930 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"ce19556c-31cc-4e0a-b092-c5cfb2cf815a","Type":"ContainerStarted","Data":"0a51316f5c9ab01808ea8421eeabb60ddb797754f125a60a8225f891ff3dc661"} Dec 10 11:12:00 crc kubenswrapper[4682]: I1210 11:12:00.826648 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 10 11:12:00 crc kubenswrapper[4682]: I1210 11:12:00.843140 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b2bb3f39-3fa9-42c1-abea-06fd2630a819","Type":"ContainerStarted","Data":"133f9a6d0f0cc9acf48bc66f392c4600042d7c34779876a45e696e4bfc10c74e"} Dec 10 11:12:00 crc kubenswrapper[4682]: I1210 11:12:00.843579 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:12:00 crc kubenswrapper[4682]: I1210 11:12:00.872417 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.87239194 podStartE2EDuration="36.87239194s" podCreationTimestamp="2025-12-10 11:11:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:12:00.853687145 +0000 UTC m=+1601.173897895" watchObservedRunningTime="2025-12-10 11:12:00.87239194 +0000 UTC m=+1601.192602700" Dec 10 11:12:00 crc kubenswrapper[4682]: I1210 11:12:00.884868 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.884814642 podStartE2EDuration="36.884814642s" podCreationTimestamp="2025-12-10 11:11:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 11:12:00.88378112 +0000 UTC m=+1601.203991880" watchObservedRunningTime="2025-12-10 11:12:00.884814642 +0000 UTC m=+1601.205025392" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.383369 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.720551 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln"] Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721157 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="extract-content" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721175 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="extract-content" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721232 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="registry-server" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721239 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="registry-server" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721248 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerName="dnsmasq-dns" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721255 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerName="dnsmasq-dns" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721283 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerName="init" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721290 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerName="init" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721310 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528a3098-5c3b-436e-80a9-319169913494" containerName="dnsmasq-dns" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721316 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="528a3098-5c3b-436e-80a9-319169913494" containerName="dnsmasq-dns" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721323 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="extract-utilities" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721330 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="extract-utilities" Dec 10 11:12:01 crc kubenswrapper[4682]: E1210 11:12:01.721340 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="528a3098-5c3b-436e-80a9-319169913494" containerName="init" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721346 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="528a3098-5c3b-436e-80a9-319169913494" containerName="init" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721599 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="528a3098-5c3b-436e-80a9-319169913494" containerName="dnsmasq-dns" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721622 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a64a7fde-65b5-4376-ac93-deb06f0ceb93" containerName="dnsmasq-dns" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.721637 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="04f85ab1-59e1-486f-9492-6a7d3848eac3" containerName="registry-server" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.725873 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.729344 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.729550 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.729711 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.729825 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.738300 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln"] Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.917928 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.918752 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.919491 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46mvq\" (UniqueName: \"kubernetes.io/projected/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-kube-api-access-46mvq\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:01 crc kubenswrapper[4682]: I1210 11:12:01.919680 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.022271 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.022498 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.022537 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.022646 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46mvq\" (UniqueName: \"kubernetes.io/projected/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-kube-api-access-46mvq\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.028027 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.029255 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.031918 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.041056 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46mvq\" (UniqueName: \"kubernetes.io/projected/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-kube-api-access-46mvq\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.047812 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:02 crc kubenswrapper[4682]: E1210 11:12:02.532371 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:12:02 crc kubenswrapper[4682]: E1210 11:12:02.532666 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:12:02 crc kubenswrapper[4682]: E1210 11:12:02.532822 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:12:02 crc kubenswrapper[4682]: E1210 11:12:02.534114 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.653529 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln"] Dec 10 11:12:02 crc kubenswrapper[4682]: I1210 11:12:02.864432 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" event={"ID":"435fb604-dad7-4d75-bb61-2e4ccf57d2b3","Type":"ContainerStarted","Data":"99313c544af2167cd0f87f2f814664446031d668e8ab444509af07248ce25161"} Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.478418 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.478964 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.479016 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.479895 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.479962 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" gracePeriod=600 Dec 10 11:12:06 crc kubenswrapper[4682]: E1210 11:12:06.618111 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.912233 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" exitCode=0 Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.912304 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f"} Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.912643 4682 scope.go:117] "RemoveContainer" containerID="9d4f095c608a9033903a024629d6bdbb8e05d5ec10f831b06e26d70cfeb1c556" Dec 10 11:12:06 crc kubenswrapper[4682]: I1210 11:12:06.913436 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:12:06 crc kubenswrapper[4682]: E1210 11:12:06.913850 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:12:12 crc kubenswrapper[4682]: E1210 11:12:12.384805 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:12:14 crc kubenswrapper[4682]: I1210 11:12:14.706643 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 10 11:12:14 crc kubenswrapper[4682]: I1210 11:12:14.911641 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 10 11:12:14 crc kubenswrapper[4682]: I1210 11:12:14.997285 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" event={"ID":"435fb604-dad7-4d75-bb61-2e4ccf57d2b3","Type":"ContainerStarted","Data":"be5b5ae06218ce4df74ec3f2a2cb93ad248a5ac892afd30c01c7e955314f597b"} Dec 10 11:12:15 crc kubenswrapper[4682]: I1210 11:12:15.017971 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" podStartSLOduration=2.8011171040000002 podStartE2EDuration="14.017951028s" podCreationTimestamp="2025-12-10 11:12:01 +0000 UTC" firstStartedPulling="2025-12-10 11:12:02.657311498 +0000 UTC m=+1602.977522248" lastFinishedPulling="2025-12-10 11:12:13.874145422 +0000 UTC m=+1614.194356172" observedRunningTime="2025-12-10 11:12:15.012705767 +0000 UTC m=+1615.332916517" watchObservedRunningTime="2025-12-10 11:12:15.017951028 +0000 UTC m=+1615.338161778" Dec 10 11:12:17 crc kubenswrapper[4682]: E1210 11:12:17.382709 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:12:19 crc kubenswrapper[4682]: I1210 11:12:19.381063 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:12:19 crc kubenswrapper[4682]: E1210 11:12:19.381644 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:12:23 crc kubenswrapper[4682]: E1210 11:12:23.384300 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:12:26 crc kubenswrapper[4682]: I1210 11:12:26.113526 4682 generic.go:334] "Generic (PLEG): container finished" podID="435fb604-dad7-4d75-bb61-2e4ccf57d2b3" containerID="be5b5ae06218ce4df74ec3f2a2cb93ad248a5ac892afd30c01c7e955314f597b" exitCode=0 Dec 10 11:12:26 crc kubenswrapper[4682]: I1210 11:12:26.113605 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" event={"ID":"435fb604-dad7-4d75-bb61-2e4ccf57d2b3","Type":"ContainerDied","Data":"be5b5ae06218ce4df74ec3f2a2cb93ad248a5ac892afd30c01c7e955314f597b"} Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.614455 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.674071 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-repo-setup-combined-ca-bundle\") pod \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.674162 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-ssh-key\") pod \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.674291 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-inventory\") pod \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.674337 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46mvq\" (UniqueName: \"kubernetes.io/projected/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-kube-api-access-46mvq\") pod \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\" (UID: \"435fb604-dad7-4d75-bb61-2e4ccf57d2b3\") " Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.681691 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "435fb604-dad7-4d75-bb61-2e4ccf57d2b3" (UID: "435fb604-dad7-4d75-bb61-2e4ccf57d2b3"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.685319 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-kube-api-access-46mvq" (OuterVolumeSpecName: "kube-api-access-46mvq") pod "435fb604-dad7-4d75-bb61-2e4ccf57d2b3" (UID: "435fb604-dad7-4d75-bb61-2e4ccf57d2b3"). InnerVolumeSpecName "kube-api-access-46mvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.710746 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-inventory" (OuterVolumeSpecName: "inventory") pod "435fb604-dad7-4d75-bb61-2e4ccf57d2b3" (UID: "435fb604-dad7-4d75-bb61-2e4ccf57d2b3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.726899 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "435fb604-dad7-4d75-bb61-2e4ccf57d2b3" (UID: "435fb604-dad7-4d75-bb61-2e4ccf57d2b3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.776554 4682 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.776597 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.776610 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:27 crc kubenswrapper[4682]: I1210 11:12:27.776621 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46mvq\" (UniqueName: \"kubernetes.io/projected/435fb604-dad7-4d75-bb61-2e4ccf57d2b3-kube-api-access-46mvq\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.138465 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" event={"ID":"435fb604-dad7-4d75-bb61-2e4ccf57d2b3","Type":"ContainerDied","Data":"99313c544af2167cd0f87f2f814664446031d668e8ab444509af07248ce25161"} Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.138524 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99313c544af2167cd0f87f2f814664446031d668e8ab444509af07248ce25161" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.138530 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.230819 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr"] Dec 10 11:12:28 crc kubenswrapper[4682]: E1210 11:12:28.231601 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435fb604-dad7-4d75-bb61-2e4ccf57d2b3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.231625 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="435fb604-dad7-4d75-bb61-2e4ccf57d2b3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.231923 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="435fb604-dad7-4d75-bb61-2e4ccf57d2b3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.232950 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.235539 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.235811 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.236448 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.237004 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.241898 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr"] Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.290259 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.290342 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.290457 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmtxr\" (UniqueName: \"kubernetes.io/projected/667432f0-bad4-4a31-9f30-29daa0e52f73-kube-api-access-lmtxr\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.392369 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmtxr\" (UniqueName: \"kubernetes.io/projected/667432f0-bad4-4a31-9f30-29daa0e52f73-kube-api-access-lmtxr\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.392449 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.392517 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.398350 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.408241 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.418812 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmtxr\" (UniqueName: \"kubernetes.io/projected/667432f0-bad4-4a31-9f30-29daa0e52f73-kube-api-access-lmtxr\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dhhsr\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:28 crc kubenswrapper[4682]: I1210 11:12:28.627178 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:29 crc kubenswrapper[4682]: W1210 11:12:29.164568 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod667432f0_bad4_4a31_9f30_29daa0e52f73.slice/crio-13e16d6dec0498d12ceb18d520053f573916af5b607e786121507cf5ccb4d9bd WatchSource:0}: Error finding container 13e16d6dec0498d12ceb18d520053f573916af5b607e786121507cf5ccb4d9bd: Status 404 returned error can't find the container with id 13e16d6dec0498d12ceb18d520053f573916af5b607e786121507cf5ccb4d9bd Dec 10 11:12:29 crc kubenswrapper[4682]: I1210 11:12:29.166121 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr"] Dec 10 11:12:30 crc kubenswrapper[4682]: I1210 11:12:30.159862 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" event={"ID":"667432f0-bad4-4a31-9f30-29daa0e52f73","Type":"ContainerStarted","Data":"868662fb4ab1467d2e6d3c62cb808bcdb3616dc535c7bf473748c51417c32d87"} Dec 10 11:12:30 crc kubenswrapper[4682]: I1210 11:12:30.160164 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" event={"ID":"667432f0-bad4-4a31-9f30-29daa0e52f73","Type":"ContainerStarted","Data":"13e16d6dec0498d12ceb18d520053f573916af5b607e786121507cf5ccb4d9bd"} Dec 10 11:12:30 crc kubenswrapper[4682]: I1210 11:12:30.186906 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" podStartSLOduration=1.695823763 podStartE2EDuration="2.1868833s" podCreationTimestamp="2025-12-10 11:12:28 +0000 UTC" firstStartedPulling="2025-12-10 11:12:29.169815532 +0000 UTC m=+1629.490026292" lastFinishedPulling="2025-12-10 11:12:29.660875079 +0000 UTC m=+1629.981085829" observedRunningTime="2025-12-10 11:12:30.182401512 +0000 UTC m=+1630.502612282" watchObservedRunningTime="2025-12-10 11:12:30.1868833 +0000 UTC m=+1630.507094050" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.587938 4682 scope.go:117] "RemoveContainer" containerID="f6642c6e0e77eab4cf0f50f0ad9415f9d054c4805befea4b95b5565ffc58354a" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.639232 4682 scope.go:117] "RemoveContainer" containerID="c742871a2c0d5f66fbc3ab0a2aef5b3c2eefd2ab03c4b9dea0a1dd5f2d9fe256" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.681447 4682 scope.go:117] "RemoveContainer" containerID="58543ad41402e0ea26cbd88eae5fe41e99b76bf8f65a97dd2ec057a35488b6b5" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.732104 4682 scope.go:117] "RemoveContainer" containerID="3471b045b620bb2b34b9814fc9e46ad102fa41d37a049588531b2c75b6396389" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.757060 4682 scope.go:117] "RemoveContainer" containerID="91fdcc557b4d22e64f4b9de115191e912904338fe6401293a35d7d82e5808b58" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.776892 4682 scope.go:117] "RemoveContainer" containerID="52324fb463d9374fe418fc5517f460fb14e8b91050922a1ac5fb777a5aed90c6" Dec 10 11:12:31 crc kubenswrapper[4682]: I1210 11:12:31.801632 4682 scope.go:117] "RemoveContainer" containerID="ccaf461857083b4af5f1e513a4962ed287e2efd7f9c51889f6188b6ad28e59bc" Dec 10 11:12:32 crc kubenswrapper[4682]: I1210 11:12:32.381605 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:12:32 crc kubenswrapper[4682]: E1210 11:12:32.381832 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:12:32 crc kubenswrapper[4682]: E1210 11:12:32.382838 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:12:33 crc kubenswrapper[4682]: I1210 11:12:33.222603 4682 generic.go:334] "Generic (PLEG): container finished" podID="667432f0-bad4-4a31-9f30-29daa0e52f73" containerID="868662fb4ab1467d2e6d3c62cb808bcdb3616dc535c7bf473748c51417c32d87" exitCode=0 Dec 10 11:12:33 crc kubenswrapper[4682]: I1210 11:12:33.222647 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" event={"ID":"667432f0-bad4-4a31-9f30-29daa0e52f73","Type":"ContainerDied","Data":"868662fb4ab1467d2e6d3c62cb808bcdb3616dc535c7bf473748c51417c32d87"} Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.719622 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.874262 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmtxr\" (UniqueName: \"kubernetes.io/projected/667432f0-bad4-4a31-9f30-29daa0e52f73-kube-api-access-lmtxr\") pod \"667432f0-bad4-4a31-9f30-29daa0e52f73\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.874357 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-inventory\") pod \"667432f0-bad4-4a31-9f30-29daa0e52f73\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.874375 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-ssh-key\") pod \"667432f0-bad4-4a31-9f30-29daa0e52f73\" (UID: \"667432f0-bad4-4a31-9f30-29daa0e52f73\") " Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.879523 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/667432f0-bad4-4a31-9f30-29daa0e52f73-kube-api-access-lmtxr" (OuterVolumeSpecName: "kube-api-access-lmtxr") pod "667432f0-bad4-4a31-9f30-29daa0e52f73" (UID: "667432f0-bad4-4a31-9f30-29daa0e52f73"). InnerVolumeSpecName "kube-api-access-lmtxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.901581 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "667432f0-bad4-4a31-9f30-29daa0e52f73" (UID: "667432f0-bad4-4a31-9f30-29daa0e52f73"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.903431 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-inventory" (OuterVolumeSpecName: "inventory") pod "667432f0-bad4-4a31-9f30-29daa0e52f73" (UID: "667432f0-bad4-4a31-9f30-29daa0e52f73"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.982842 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmtxr\" (UniqueName: \"kubernetes.io/projected/667432f0-bad4-4a31-9f30-29daa0e52f73-kube-api-access-lmtxr\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.982884 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:34 crc kubenswrapper[4682]: I1210 11:12:34.982907 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/667432f0-bad4-4a31-9f30-29daa0e52f73-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.247225 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" event={"ID":"667432f0-bad4-4a31-9f30-29daa0e52f73","Type":"ContainerDied","Data":"13e16d6dec0498d12ceb18d520053f573916af5b607e786121507cf5ccb4d9bd"} Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.247558 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13e16d6dec0498d12ceb18d520053f573916af5b607e786121507cf5ccb4d9bd" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.247280 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dhhsr" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.382865 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm"] Dec 10 11:12:35 crc kubenswrapper[4682]: E1210 11:12:35.383328 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="667432f0-bad4-4a31-9f30-29daa0e52f73" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.383348 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="667432f0-bad4-4a31-9f30-29daa0e52f73" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.383650 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="667432f0-bad4-4a31-9f30-29daa0e52f73" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.384383 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.388927 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.389158 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.389529 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.389625 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.397532 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm"] Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.492694 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.492927 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.493647 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl2wd\" (UniqueName: \"kubernetes.io/projected/a02eab3d-1fa5-4960-bf40-d9822a5c9122-kube-api-access-zl2wd\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.493880 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.595727 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl2wd\" (UniqueName: \"kubernetes.io/projected/a02eab3d-1fa5-4960-bf40-d9822a5c9122-kube-api-access-zl2wd\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.595807 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.595871 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.595960 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.600589 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.600764 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.600919 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.614155 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl2wd\" (UniqueName: \"kubernetes.io/projected/a02eab3d-1fa5-4960-bf40-d9822a5c9122-kube-api-access-zl2wd\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:35 crc kubenswrapper[4682]: I1210 11:12:35.711062 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:12:36 crc kubenswrapper[4682]: I1210 11:12:36.284928 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:12:36 crc kubenswrapper[4682]: I1210 11:12:36.285090 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm"] Dec 10 11:12:37 crc kubenswrapper[4682]: I1210 11:12:37.266039 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" event={"ID":"a02eab3d-1fa5-4960-bf40-d9822a5c9122","Type":"ContainerStarted","Data":"49ddc0cfb4b868b82c10ef7ce289d76e321f7835d9ed1bb74499859717de04ab"} Dec 10 11:12:37 crc kubenswrapper[4682]: E1210 11:12:37.500455 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:12:37 crc kubenswrapper[4682]: E1210 11:12:37.500814 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:12:37 crc kubenswrapper[4682]: E1210 11:12:37.500993 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:12:37 crc kubenswrapper[4682]: E1210 11:12:37.502235 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:12:39 crc kubenswrapper[4682]: I1210 11:12:39.293929 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" event={"ID":"a02eab3d-1fa5-4960-bf40-d9822a5c9122","Type":"ContainerStarted","Data":"18b840b95734cc6d3aebb002b0192f686f7cb12db4d57a581f67e80d42456c77"} Dec 10 11:12:39 crc kubenswrapper[4682]: I1210 11:12:39.319330 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" podStartSLOduration=1.596939852 podStartE2EDuration="4.319309813s" podCreationTimestamp="2025-12-10 11:12:35 +0000 UTC" firstStartedPulling="2025-12-10 11:12:36.284697685 +0000 UTC m=+1636.604908445" lastFinishedPulling="2025-12-10 11:12:39.007067656 +0000 UTC m=+1639.327278406" observedRunningTime="2025-12-10 11:12:39.309294903 +0000 UTC m=+1639.629505663" watchObservedRunningTime="2025-12-10 11:12:39.319309813 +0000 UTC m=+1639.639520583" Dec 10 11:12:46 crc kubenswrapper[4682]: I1210 11:12:46.381537 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:12:46 crc kubenswrapper[4682]: E1210 11:12:46.382677 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:12:47 crc kubenswrapper[4682]: E1210 11:12:47.519522 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:12:47 crc kubenswrapper[4682]: E1210 11:12:47.519864 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:12:47 crc kubenswrapper[4682]: E1210 11:12:47.520077 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:12:47 crc kubenswrapper[4682]: E1210 11:12:47.521301 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:12:48 crc kubenswrapper[4682]: E1210 11:12:48.382757 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:12:58 crc kubenswrapper[4682]: E1210 11:12:58.383871 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:13:00 crc kubenswrapper[4682]: I1210 11:13:00.584269 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:13:00 crc kubenswrapper[4682]: E1210 11:13:00.590655 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:13:02 crc kubenswrapper[4682]: E1210 11:13:02.383663 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:13:09 crc kubenswrapper[4682]: E1210 11:13:09.385280 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:13:15 crc kubenswrapper[4682]: I1210 11:13:15.382107 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:13:15 crc kubenswrapper[4682]: E1210 11:13:15.382926 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:13:15 crc kubenswrapper[4682]: E1210 11:13:15.383403 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:13:23 crc kubenswrapper[4682]: E1210 11:13:23.383254 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:13:27 crc kubenswrapper[4682]: I1210 11:13:27.381435 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:13:27 crc kubenswrapper[4682]: E1210 11:13:27.383950 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:13:27 crc kubenswrapper[4682]: E1210 11:13:27.383994 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:13:31 crc kubenswrapper[4682]: I1210 11:13:31.990183 4682 scope.go:117] "RemoveContainer" containerID="45818218304beb7ae2a3a3ceada5cb07bd1e5c66b3ecda6a0ae2e3fe59274616" Dec 10 11:13:38 crc kubenswrapper[4682]: E1210 11:13:38.383414 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:13:38 crc kubenswrapper[4682]: E1210 11:13:38.383602 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:13:40 crc kubenswrapper[4682]: I1210 11:13:40.394174 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:13:40 crc kubenswrapper[4682]: E1210 11:13:40.394755 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:13:49 crc kubenswrapper[4682]: E1210 11:13:49.382973 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:13:51 crc kubenswrapper[4682]: I1210 11:13:51.380977 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:13:51 crc kubenswrapper[4682]: E1210 11:13:51.381524 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:13:53 crc kubenswrapper[4682]: E1210 11:13:53.385708 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:14:00 crc kubenswrapper[4682]: E1210 11:14:00.449521 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:14:02 crc kubenswrapper[4682]: I1210 11:14:02.381450 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:14:02 crc kubenswrapper[4682]: E1210 11:14:02.382044 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:14:06 crc kubenswrapper[4682]: E1210 11:14:06.476060 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:14:06 crc kubenswrapper[4682]: E1210 11:14:06.476726 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:14:06 crc kubenswrapper[4682]: E1210 11:14:06.476871 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:14:06 crc kubenswrapper[4682]: E1210 11:14:06.478569 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:14:12 crc kubenswrapper[4682]: E1210 11:14:12.495809 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:14:12 crc kubenswrapper[4682]: E1210 11:14:12.497296 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:14:12 crc kubenswrapper[4682]: E1210 11:14:12.497565 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:14:12 crc kubenswrapper[4682]: E1210 11:14:12.499002 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:14:16 crc kubenswrapper[4682]: I1210 11:14:16.381092 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:14:16 crc kubenswrapper[4682]: E1210 11:14:16.381866 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:14:20 crc kubenswrapper[4682]: E1210 11:14:20.395734 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:14:26 crc kubenswrapper[4682]: E1210 11:14:26.383036 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:14:29 crc kubenswrapper[4682]: I1210 11:14:29.382015 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:14:29 crc kubenswrapper[4682]: E1210 11:14:29.382809 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:14:32 crc kubenswrapper[4682]: I1210 11:14:32.069376 4682 scope.go:117] "RemoveContainer" containerID="01c6c916c8fadf7544df60c05f656772683cab4e119c8feb2b2152095d7dfeef" Dec 10 11:14:32 crc kubenswrapper[4682]: I1210 11:14:32.103994 4682 scope.go:117] "RemoveContainer" containerID="cb98015b0a07b29eeb84dcd3e32e36e10eb4cbba2c2f5efe9c63e6094c517ac9" Dec 10 11:14:32 crc kubenswrapper[4682]: I1210 11:14:32.194513 4682 scope.go:117] "RemoveContainer" containerID="5372924aaca2b14f3b73b5b5084ec5396fc73d31daa2f696b56cc0f324d1dae7" Dec 10 11:14:33 crc kubenswrapper[4682]: E1210 11:14:33.384954 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:14:38 crc kubenswrapper[4682]: E1210 11:14:38.386877 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:14:40 crc kubenswrapper[4682]: I1210 11:14:40.394444 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:14:40 crc kubenswrapper[4682]: E1210 11:14:40.395357 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:14:44 crc kubenswrapper[4682]: E1210 11:14:44.382823 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:14:49 crc kubenswrapper[4682]: E1210 11:14:49.384032 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:14:52 crc kubenswrapper[4682]: I1210 11:14:52.380925 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:14:52 crc kubenswrapper[4682]: E1210 11:14:52.381456 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:14:56 crc kubenswrapper[4682]: E1210 11:14:56.383070 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.154239 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6"] Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.156457 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.159373 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.159636 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.167454 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6"] Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.169130 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-config-volume\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.169291 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9hmg\" (UniqueName: \"kubernetes.io/projected/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-kube-api-access-s9hmg\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.169383 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-secret-volume\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.270838 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9hmg\" (UniqueName: \"kubernetes.io/projected/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-kube-api-access-s9hmg\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.271268 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-secret-volume\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.271390 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-config-volume\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.272363 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-config-volume\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.283717 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-secret-volume\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.291598 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9hmg\" (UniqueName: \"kubernetes.io/projected/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-kube-api-access-s9hmg\") pod \"collect-profiles-29422755-p9dh6\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: E1210 11:15:00.392898 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.483092 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.922017 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6"] Dec 10 11:15:00 crc kubenswrapper[4682]: I1210 11:15:00.959388 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" event={"ID":"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e","Type":"ContainerStarted","Data":"11dbc91dccf20b4fcab3b6b770793daa02f1358a1ef49fe2e4eceea3ae38368d"} Dec 10 11:15:01 crc kubenswrapper[4682]: I1210 11:15:01.970264 4682 generic.go:334] "Generic (PLEG): container finished" podID="8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" containerID="91fcf5bb8900f93cf03724fa918cacfd36601d4841d1a4e73e322d60080f6b04" exitCode=0 Dec 10 11:15:01 crc kubenswrapper[4682]: I1210 11:15:01.970439 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" event={"ID":"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e","Type":"ContainerDied","Data":"91fcf5bb8900f93cf03724fa918cacfd36601d4841d1a4e73e322d60080f6b04"} Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.381513 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:15:03 crc kubenswrapper[4682]: E1210 11:15:03.382046 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.477427 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.565796 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-config-volume\") pod \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.565867 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-secret-volume\") pod \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.566011 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9hmg\" (UniqueName: \"kubernetes.io/projected/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-kube-api-access-s9hmg\") pod \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\" (UID: \"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e\") " Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.566529 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-config-volume" (OuterVolumeSpecName: "config-volume") pod "8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" (UID: "8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.566727 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.571495 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-kube-api-access-s9hmg" (OuterVolumeSpecName: "kube-api-access-s9hmg") pod "8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" (UID: "8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e"). InnerVolumeSpecName "kube-api-access-s9hmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.571680 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" (UID: "8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.669718 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:03 crc kubenswrapper[4682]: I1210 11:15:03.669758 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9hmg\" (UniqueName: \"kubernetes.io/projected/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e-kube-api-access-s9hmg\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:04 crc kubenswrapper[4682]: I1210 11:15:04.005697 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" event={"ID":"8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e","Type":"ContainerDied","Data":"11dbc91dccf20b4fcab3b6b770793daa02f1358a1ef49fe2e4eceea3ae38368d"} Dec 10 11:15:04 crc kubenswrapper[4682]: I1210 11:15:04.005736 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11dbc91dccf20b4fcab3b6b770793daa02f1358a1ef49fe2e4eceea3ae38368d" Dec 10 11:15:04 crc kubenswrapper[4682]: I1210 11:15:04.005804 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6" Dec 10 11:15:09 crc kubenswrapper[4682]: E1210 11:15:09.384990 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:15:12 crc kubenswrapper[4682]: E1210 11:15:12.383336 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:15:14 crc kubenswrapper[4682]: I1210 11:15:14.381998 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:15:14 crc kubenswrapper[4682]: E1210 11:15:14.382330 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:15:22 crc kubenswrapper[4682]: E1210 11:15:22.384057 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:15:25 crc kubenswrapper[4682]: E1210 11:15:25.393993 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:15:28 crc kubenswrapper[4682]: I1210 11:15:28.381651 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:15:28 crc kubenswrapper[4682]: E1210 11:15:28.382215 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:15:35 crc kubenswrapper[4682]: E1210 11:15:35.382333 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:15:40 crc kubenswrapper[4682]: I1210 11:15:40.393316 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:15:40 crc kubenswrapper[4682]: E1210 11:15:40.394157 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:15:40 crc kubenswrapper[4682]: E1210 11:15:40.395052 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:15:47 crc kubenswrapper[4682]: E1210 11:15:47.383077 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:15:48 crc kubenswrapper[4682]: I1210 11:15:48.498408 4682 generic.go:334] "Generic (PLEG): container finished" podID="a02eab3d-1fa5-4960-bf40-d9822a5c9122" containerID="18b840b95734cc6d3aebb002b0192f686f7cb12db4d57a581f67e80d42456c77" exitCode=0 Dec 10 11:15:48 crc kubenswrapper[4682]: I1210 11:15:48.498470 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" event={"ID":"a02eab3d-1fa5-4960-bf40-d9822a5c9122","Type":"ContainerDied","Data":"18b840b95734cc6d3aebb002b0192f686f7cb12db4d57a581f67e80d42456c77"} Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.060706 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.179233 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-inventory\") pod \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.179373 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-ssh-key\") pod \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.179580 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zl2wd\" (UniqueName: \"kubernetes.io/projected/a02eab3d-1fa5-4960-bf40-d9822a5c9122-kube-api-access-zl2wd\") pod \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.179657 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-bootstrap-combined-ca-bundle\") pod \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\" (UID: \"a02eab3d-1fa5-4960-bf40-d9822a5c9122\") " Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.185321 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a02eab3d-1fa5-4960-bf40-d9822a5c9122-kube-api-access-zl2wd" (OuterVolumeSpecName: "kube-api-access-zl2wd") pod "a02eab3d-1fa5-4960-bf40-d9822a5c9122" (UID: "a02eab3d-1fa5-4960-bf40-d9822a5c9122"). InnerVolumeSpecName "kube-api-access-zl2wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.190680 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a02eab3d-1fa5-4960-bf40-d9822a5c9122" (UID: "a02eab3d-1fa5-4960-bf40-d9822a5c9122"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.207714 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-inventory" (OuterVolumeSpecName: "inventory") pod "a02eab3d-1fa5-4960-bf40-d9822a5c9122" (UID: "a02eab3d-1fa5-4960-bf40-d9822a5c9122"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.212733 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a02eab3d-1fa5-4960-bf40-d9822a5c9122" (UID: "a02eab3d-1fa5-4960-bf40-d9822a5c9122"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.282390 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zl2wd\" (UniqueName: \"kubernetes.io/projected/a02eab3d-1fa5-4960-bf40-d9822a5c9122-kube-api-access-zl2wd\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.282420 4682 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.282431 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.282439 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a02eab3d-1fa5-4960-bf40-d9822a5c9122-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.523002 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" event={"ID":"a02eab3d-1fa5-4960-bf40-d9822a5c9122","Type":"ContainerDied","Data":"49ddc0cfb4b868b82c10ef7ce289d76e321f7835d9ed1bb74499859717de04ab"} Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.523046 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49ddc0cfb4b868b82c10ef7ce289d76e321f7835d9ed1bb74499859717de04ab" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.523056 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.637025 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v"] Dec 10 11:15:50 crc kubenswrapper[4682]: E1210 11:15:50.637525 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" containerName="collect-profiles" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.637544 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" containerName="collect-profiles" Dec 10 11:15:50 crc kubenswrapper[4682]: E1210 11:15:50.637567 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a02eab3d-1fa5-4960-bf40-d9822a5c9122" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.637578 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a02eab3d-1fa5-4960-bf40-d9822a5c9122" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.637958 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" containerName="collect-profiles" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.637989 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a02eab3d-1fa5-4960-bf40-d9822a5c9122" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.638893 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.640729 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.641923 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.642353 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.650059 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.658357 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v"] Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.792989 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fspbc\" (UniqueName: \"kubernetes.io/projected/df88b6db-13a9-4d76-a9da-e259ef1f79a2-kube-api-access-fspbc\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.793244 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.793333 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.894905 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fspbc\" (UniqueName: \"kubernetes.io/projected/df88b6db-13a9-4d76-a9da-e259ef1f79a2-kube-api-access-fspbc\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.895054 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.895112 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.898541 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.898755 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.912022 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fspbc\" (UniqueName: \"kubernetes.io/projected/df88b6db-13a9-4d76-a9da-e259ef1f79a2-kube-api-access-fspbc\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-49h6v\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:50 crc kubenswrapper[4682]: I1210 11:15:50.964093 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:15:51 crc kubenswrapper[4682]: E1210 11:15:51.382892 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:15:51 crc kubenswrapper[4682]: I1210 11:15:51.484370 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v"] Dec 10 11:15:51 crc kubenswrapper[4682]: I1210 11:15:51.537002 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" event={"ID":"df88b6db-13a9-4d76-a9da-e259ef1f79a2","Type":"ContainerStarted","Data":"3866b4ead541a51109967f12e3252cc6dd31959e535de052e6957249df59d190"} Dec 10 11:15:52 crc kubenswrapper[4682]: I1210 11:15:52.548100 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" event={"ID":"df88b6db-13a9-4d76-a9da-e259ef1f79a2","Type":"ContainerStarted","Data":"618c4ef5c6e030299c8a389db8c234467e4516465f7514e3183d44b4d3695ae0"} Dec 10 11:15:52 crc kubenswrapper[4682]: I1210 11:15:52.561623 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" podStartSLOduration=2.084565251 podStartE2EDuration="2.561604412s" podCreationTimestamp="2025-12-10 11:15:50 +0000 UTC" firstStartedPulling="2025-12-10 11:15:51.481884825 +0000 UTC m=+1831.802095565" lastFinishedPulling="2025-12-10 11:15:51.958923976 +0000 UTC m=+1832.279134726" observedRunningTime="2025-12-10 11:15:52.559756455 +0000 UTC m=+1832.879967205" watchObservedRunningTime="2025-12-10 11:15:52.561604412 +0000 UTC m=+1832.881815162" Dec 10 11:15:55 crc kubenswrapper[4682]: I1210 11:15:55.381039 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:15:55 crc kubenswrapper[4682]: E1210 11:15:55.382152 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:15:59 crc kubenswrapper[4682]: E1210 11:15:59.385267 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:16:06 crc kubenswrapper[4682]: E1210 11:16:06.383284 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:16:09 crc kubenswrapper[4682]: I1210 11:16:09.380875 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:16:09 crc kubenswrapper[4682]: E1210 11:16:09.381988 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:16:13 crc kubenswrapper[4682]: I1210 11:16:13.054074 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-1423-account-create-update-lzxsx"] Dec 10 11:16:13 crc kubenswrapper[4682]: I1210 11:16:13.067707 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-c2zgs"] Dec 10 11:16:13 crc kubenswrapper[4682]: I1210 11:16:13.076575 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-c2zgs"] Dec 10 11:16:13 crc kubenswrapper[4682]: I1210 11:16:13.085044 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-1423-account-create-update-lzxsx"] Dec 10 11:16:13 crc kubenswrapper[4682]: E1210 11:16:13.384241 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:16:14 crc kubenswrapper[4682]: I1210 11:16:14.395463 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="722a4519-42a8-4f50-8665-59e8bb94a134" path="/var/lib/kubelet/pods/722a4519-42a8-4f50-8665-59e8bb94a134/volumes" Dec 10 11:16:14 crc kubenswrapper[4682]: I1210 11:16:14.398091 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dba42c85-eeda-4249-8026-6581d57f8dcf" path="/var/lib/kubelet/pods/dba42c85-eeda-4249-8026-6581d57f8dcf/volumes" Dec 10 11:16:17 crc kubenswrapper[4682]: I1210 11:16:17.048886 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-49tzp"] Dec 10 11:16:17 crc kubenswrapper[4682]: I1210 11:16:17.062527 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-d60a-account-create-update-cf5pj"] Dec 10 11:16:17 crc kubenswrapper[4682]: I1210 11:16:17.072308 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-49tzp"] Dec 10 11:16:17 crc kubenswrapper[4682]: I1210 11:16:17.080657 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-d60a-account-create-update-cf5pj"] Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.035795 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-jrfpg"] Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.056312 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-b00f-account-create-update-gvhl8"] Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.065606 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-jrfpg"] Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.074261 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-b00f-account-create-update-gvhl8"] Dec 10 11:16:18 crc kubenswrapper[4682]: E1210 11:16:18.382641 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.394097 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dfa068f-e434-4d53-97c8-44f153f4847f" path="/var/lib/kubelet/pods/0dfa068f-e434-4d53-97c8-44f153f4847f/volumes" Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.394734 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10c2d035-3ea8-46a7-9380-0bbe5d729bfe" path="/var/lib/kubelet/pods/10c2d035-3ea8-46a7-9380-0bbe5d729bfe/volumes" Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.395377 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a292c622-0bd6-436d-95b4-8ca5e643fe10" path="/var/lib/kubelet/pods/a292c622-0bd6-436d-95b4-8ca5e643fe10/volumes" Dec 10 11:16:18 crc kubenswrapper[4682]: I1210 11:16:18.396046 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa69a882-93ca-452f-9be6-2efc7b53f838" path="/var/lib/kubelet/pods/fa69a882-93ca-452f-9be6-2efc7b53f838/volumes" Dec 10 11:16:20 crc kubenswrapper[4682]: I1210 11:16:20.390242 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:16:20 crc kubenswrapper[4682]: E1210 11:16:20.390918 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:16:26 crc kubenswrapper[4682]: I1210 11:16:26.028518 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-5vf6p"] Dec 10 11:16:26 crc kubenswrapper[4682]: I1210 11:16:26.062023 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-5vf6p"] Dec 10 11:16:26 crc kubenswrapper[4682]: I1210 11:16:26.393759 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81a956dc-54cb-4eb1-8ac2-996a66eca415" path="/var/lib/kubelet/pods/81a956dc-54cb-4eb1-8ac2-996a66eca415/volumes" Dec 10 11:16:27 crc kubenswrapper[4682]: I1210 11:16:27.032197 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-b63b-account-create-update-7tx96"] Dec 10 11:16:27 crc kubenswrapper[4682]: I1210 11:16:27.042502 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fa63-account-create-update-pb8d8"] Dec 10 11:16:27 crc kubenswrapper[4682]: I1210 11:16:27.052806 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-fa63-account-create-update-pb8d8"] Dec 10 11:16:27 crc kubenswrapper[4682]: I1210 11:16:27.062280 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-b63b-account-create-update-7tx96"] Dec 10 11:16:27 crc kubenswrapper[4682]: E1210 11:16:27.382771 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.038311 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-bxmng"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.047449 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-9b10-account-create-update-p68fr"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.064607 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-bxmng"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.073196 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-04dc-account-create-update-ddvxc"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.082804 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-db-create-pglvk"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.090784 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-04dc-account-create-update-ddvxc"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.098669 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-9b10-account-create-update-p68fr"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.106528 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-54zjl"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.114307 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-db-create-pglvk"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.122270 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-54zjl"] Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.394428 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="241a5a1d-b18b-4151-9aa9-81d82d723700" path="/var/lib/kubelet/pods/241a5a1d-b18b-4151-9aa9-81d82d723700/volumes" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.395780 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24b52051-08ad-426d-a9d4-23465f022f28" path="/var/lib/kubelet/pods/24b52051-08ad-426d-a9d4-23465f022f28/volumes" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.397677 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e662a28-3dfe-43c7-a368-ea48cd6867a8" path="/var/lib/kubelet/pods/2e662a28-3dfe-43c7-a368-ea48cd6867a8/volumes" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.398541 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="433ef90f-139b-4a60-918b-ef0a226ee731" path="/var/lib/kubelet/pods/433ef90f-139b-4a60-918b-ef0a226ee731/volumes" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.399665 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e45654e-91af-4171-a0f7-e15eac1a40e9" path="/var/lib/kubelet/pods/4e45654e-91af-4171-a0f7-e15eac1a40e9/volumes" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.400416 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86b9680d-68e4-4b89-bf5a-4925464b50ef" path="/var/lib/kubelet/pods/86b9680d-68e4-4b89-bf5a-4925464b50ef/volumes" Dec 10 11:16:28 crc kubenswrapper[4682]: I1210 11:16:28.401179 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6473447-e97b-4ce1-bc48-0028c9ac3444" path="/var/lib/kubelet/pods/d6473447-e97b-4ce1-bc48-0028c9ac3444/volumes" Dec 10 11:16:31 crc kubenswrapper[4682]: I1210 11:16:31.381589 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:16:31 crc kubenswrapper[4682]: E1210 11:16:31.384256 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.305962 4682 scope.go:117] "RemoveContainer" containerID="75a175c83f62d943fb078473583e8fe4405c08b5eb3594c17433a8ccba984aa2" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.331488 4682 scope.go:117] "RemoveContainer" containerID="b0ac107c437020dc84de66755d26373d3720e923f47e2889728f6796ef1812bd" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.386616 4682 scope.go:117] "RemoveContainer" containerID="b228321dc617cce4be5adeb972528a59d65c553ab5ee47201cc1d98e5861fd6a" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.435922 4682 scope.go:117] "RemoveContainer" containerID="3acb44d1538c9f2a339233094a9bed57332635b97b9cbec3d21db0c657f086c9" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.484373 4682 scope.go:117] "RemoveContainer" containerID="1b76cc41cf90775abb0f780a203fa619cb5730b0315f5693ad956531adcfe62c" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.530092 4682 scope.go:117] "RemoveContainer" containerID="80c7e389620ac0aba07b3ce5cedc34d39ecc3e82d31c2ce1098a9e017f4aa39c" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.568659 4682 scope.go:117] "RemoveContainer" containerID="539b09b199ebb47b12ad99cc7094997abb38ce6bb0a7f6702141dccbcdb76a57" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.594318 4682 scope.go:117] "RemoveContainer" containerID="af8497874b71553ed16ea052b1911ff74b1640215571ae0d6342ed48e7e50519" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.621732 4682 scope.go:117] "RemoveContainer" containerID="7ffd089fffcbb868d1c5881c31e2ad6b83a45058cdc7a29a52aa910fc962f198" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.663661 4682 scope.go:117] "RemoveContainer" containerID="41aa57bd613f104130b3ebf0d4c9dff97a5c58ee309d7e345ef165f15231db4b" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.719365 4682 scope.go:117] "RemoveContainer" containerID="5b338ebd6437cc21ebc24eb3f749ae52269999a44ad36854df37c8f13d8ef6a0" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.763025 4682 scope.go:117] "RemoveContainer" containerID="e89792f03ca32052e3cc2473918637ed39251ba9ca9da8d48a2cf33e117c3ce7" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.797090 4682 scope.go:117] "RemoveContainer" containerID="56b58b48aac3c94f010eec728f4bc3e8103d8d4ccdcbc07059fd76caf5049765" Dec 10 11:16:32 crc kubenswrapper[4682]: I1210 11:16:32.814816 4682 scope.go:117] "RemoveContainer" containerID="68600279b2804549dc5c231bf815ff7e8fe46dd4cfd7c8000298b8e88ed54989" Dec 10 11:16:33 crc kubenswrapper[4682]: E1210 11:16:33.382802 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.037608 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pspsq"] Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.042177 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.055179 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pspsq"] Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.214910 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-catalog-content\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.214972 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-utilities\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.215025 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x729\" (UniqueName: \"kubernetes.io/projected/83b532b8-9d48-436c-acd3-85999347507d-kube-api-access-4x729\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.316741 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-utilities\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.316836 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x729\" (UniqueName: \"kubernetes.io/projected/83b532b8-9d48-436c-acd3-85999347507d-kube-api-access-4x729\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.316960 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-catalog-content\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.317545 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-catalog-content\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.317700 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-utilities\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.336290 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x729\" (UniqueName: \"kubernetes.io/projected/83b532b8-9d48-436c-acd3-85999347507d-kube-api-access-4x729\") pod \"community-operators-pspsq\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.370329 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:37 crc kubenswrapper[4682]: I1210 11:16:37.865356 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pspsq"] Dec 10 11:16:38 crc kubenswrapper[4682]: I1210 11:16:38.043186 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerStarted","Data":"4e4a67398c8f11fcf2419b56265bee6d9f13ae41221400289a4b0f3b58d8ea01"} Dec 10 11:16:39 crc kubenswrapper[4682]: I1210 11:16:39.055123 4682 generic.go:334] "Generic (PLEG): container finished" podID="83b532b8-9d48-436c-acd3-85999347507d" containerID="40ebed827bad84b0cfe8c31fa5351e8cac8d950b7e604eb67e06672ce86a0c88" exitCode=0 Dec 10 11:16:39 crc kubenswrapper[4682]: I1210 11:16:39.055226 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerDied","Data":"40ebed827bad84b0cfe8c31fa5351e8cac8d950b7e604eb67e06672ce86a0c88"} Dec 10 11:16:40 crc kubenswrapper[4682]: I1210 11:16:40.069618 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerStarted","Data":"6fbd2abf0acdcb21804b1915eeaa4d048a0f082b421afb3ea43025132ad1f73e"} Dec 10 11:16:41 crc kubenswrapper[4682]: I1210 11:16:41.084191 4682 generic.go:334] "Generic (PLEG): container finished" podID="83b532b8-9d48-436c-acd3-85999347507d" containerID="6fbd2abf0acdcb21804b1915eeaa4d048a0f082b421afb3ea43025132ad1f73e" exitCode=0 Dec 10 11:16:41 crc kubenswrapper[4682]: I1210 11:16:41.084536 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerDied","Data":"6fbd2abf0acdcb21804b1915eeaa4d048a0f082b421afb3ea43025132ad1f73e"} Dec 10 11:16:41 crc kubenswrapper[4682]: E1210 11:16:41.393373 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:16:42 crc kubenswrapper[4682]: I1210 11:16:42.098249 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerStarted","Data":"dd07fcd5506021f86627ec496dfbfd26a136fab3258c18e01dd1b11932cbccf8"} Dec 10 11:16:42 crc kubenswrapper[4682]: I1210 11:16:42.117826 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pspsq" podStartSLOduration=2.652463637 podStartE2EDuration="5.117809591s" podCreationTimestamp="2025-12-10 11:16:37 +0000 UTC" firstStartedPulling="2025-12-10 11:16:39.057042404 +0000 UTC m=+1879.377253164" lastFinishedPulling="2025-12-10 11:16:41.522388368 +0000 UTC m=+1881.842599118" observedRunningTime="2025-12-10 11:16:42.114804848 +0000 UTC m=+1882.435015618" watchObservedRunningTime="2025-12-10 11:16:42.117809591 +0000 UTC m=+1882.438020341" Dec 10 11:16:43 crc kubenswrapper[4682]: I1210 11:16:43.381877 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:16:43 crc kubenswrapper[4682]: E1210 11:16:43.382530 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:16:44 crc kubenswrapper[4682]: I1210 11:16:44.065905 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-ms2nv"] Dec 10 11:16:44 crc kubenswrapper[4682]: I1210 11:16:44.082892 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-ms2nv"] Dec 10 11:16:44 crc kubenswrapper[4682]: I1210 11:16:44.393188 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="327df6d2-4568-45e6-a719-650e8881d7cc" path="/var/lib/kubelet/pods/327df6d2-4568-45e6-a719-650e8881d7cc/volumes" Dec 10 11:16:46 crc kubenswrapper[4682]: E1210 11:16:46.383913 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:16:47 crc kubenswrapper[4682]: I1210 11:16:47.371049 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:47 crc kubenswrapper[4682]: I1210 11:16:47.371104 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:47 crc kubenswrapper[4682]: I1210 11:16:47.454127 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:48 crc kubenswrapper[4682]: I1210 11:16:48.270123 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:49 crc kubenswrapper[4682]: I1210 11:16:49.043297 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-bxlhz"] Dec 10 11:16:49 crc kubenswrapper[4682]: I1210 11:16:49.057515 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-bxlhz"] Dec 10 11:16:50 crc kubenswrapper[4682]: I1210 11:16:50.394602 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ca63023-1a06-43a7-b9e4-1235b76b8ec8" path="/var/lib/kubelet/pods/4ca63023-1a06-43a7-b9e4-1235b76b8ec8/volumes" Dec 10 11:16:51 crc kubenswrapper[4682]: I1210 11:16:51.819675 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pspsq"] Dec 10 11:16:51 crc kubenswrapper[4682]: I1210 11:16:51.820151 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pspsq" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="registry-server" containerID="cri-o://dd07fcd5506021f86627ec496dfbfd26a136fab3258c18e01dd1b11932cbccf8" gracePeriod=2 Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.259423 4682 generic.go:334] "Generic (PLEG): container finished" podID="83b532b8-9d48-436c-acd3-85999347507d" containerID="dd07fcd5506021f86627ec496dfbfd26a136fab3258c18e01dd1b11932cbccf8" exitCode=0 Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.259613 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerDied","Data":"dd07fcd5506021f86627ec496dfbfd26a136fab3258c18e01dd1b11932cbccf8"} Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.259684 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pspsq" event={"ID":"83b532b8-9d48-436c-acd3-85999347507d","Type":"ContainerDied","Data":"4e4a67398c8f11fcf2419b56265bee6d9f13ae41221400289a4b0f3b58d8ea01"} Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.259706 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e4a67398c8f11fcf2419b56265bee6d9f13ae41221400289a4b0f3b58d8ea01" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.297203 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.377365 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4x729\" (UniqueName: \"kubernetes.io/projected/83b532b8-9d48-436c-acd3-85999347507d-kube-api-access-4x729\") pod \"83b532b8-9d48-436c-acd3-85999347507d\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.377563 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-utilities\") pod \"83b532b8-9d48-436c-acd3-85999347507d\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.377608 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-catalog-content\") pod \"83b532b8-9d48-436c-acd3-85999347507d\" (UID: \"83b532b8-9d48-436c-acd3-85999347507d\") " Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.378766 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-utilities" (OuterVolumeSpecName: "utilities") pod "83b532b8-9d48-436c-acd3-85999347507d" (UID: "83b532b8-9d48-436c-acd3-85999347507d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.383716 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83b532b8-9d48-436c-acd3-85999347507d-kube-api-access-4x729" (OuterVolumeSpecName: "kube-api-access-4x729") pod "83b532b8-9d48-436c-acd3-85999347507d" (UID: "83b532b8-9d48-436c-acd3-85999347507d"). InnerVolumeSpecName "kube-api-access-4x729". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.425171 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83b532b8-9d48-436c-acd3-85999347507d" (UID: "83b532b8-9d48-436c-acd3-85999347507d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.479332 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4x729\" (UniqueName: \"kubernetes.io/projected/83b532b8-9d48-436c-acd3-85999347507d-kube-api-access-4x729\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.479565 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:52 crc kubenswrapper[4682]: I1210 11:16:52.479654 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b532b8-9d48-436c-acd3-85999347507d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:16:53 crc kubenswrapper[4682]: I1210 11:16:53.273871 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pspsq" Dec 10 11:16:53 crc kubenswrapper[4682]: I1210 11:16:53.313526 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pspsq"] Dec 10 11:16:53 crc kubenswrapper[4682]: I1210 11:16:53.321649 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pspsq"] Dec 10 11:16:53 crc kubenswrapper[4682]: E1210 11:16:53.478135 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:16:53 crc kubenswrapper[4682]: E1210 11:16:53.478200 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:16:53 crc kubenswrapper[4682]: E1210 11:16:53.478346 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:16:53 crc kubenswrapper[4682]: E1210 11:16:53.479747 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:16:54 crc kubenswrapper[4682]: I1210 11:16:54.399585 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83b532b8-9d48-436c-acd3-85999347507d" path="/var/lib/kubelet/pods/83b532b8-9d48-436c-acd3-85999347507d/volumes" Dec 10 11:16:57 crc kubenswrapper[4682]: I1210 11:16:57.381617 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:16:57 crc kubenswrapper[4682]: E1210 11:16:57.382327 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:16:57 crc kubenswrapper[4682]: E1210 11:16:57.514276 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:16:57 crc kubenswrapper[4682]: E1210 11:16:57.514336 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:16:57 crc kubenswrapper[4682]: E1210 11:16:57.514451 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:16:57 crc kubenswrapper[4682]: E1210 11:16:57.515645 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:17:05 crc kubenswrapper[4682]: E1210 11:17:05.383004 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:17:08 crc kubenswrapper[4682]: I1210 11:17:08.381698 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:17:08 crc kubenswrapper[4682]: E1210 11:17:08.384330 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:17:09 crc kubenswrapper[4682]: I1210 11:17:09.470158 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"09157c81d5b2d322b1ec981283a8a88601f69c18b66aa9a19af7086b9a080694"} Dec 10 11:17:13 crc kubenswrapper[4682]: I1210 11:17:13.042990 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-tx82q"] Dec 10 11:17:13 crc kubenswrapper[4682]: I1210 11:17:13.065836 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-tx82q"] Dec 10 11:17:14 crc kubenswrapper[4682]: I1210 11:17:14.397455 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9208fd9-c069-4f27-868c-e248ef7970c0" path="/var/lib/kubelet/pods/b9208fd9-c069-4f27-868c-e248ef7970c0/volumes" Dec 10 11:17:17 crc kubenswrapper[4682]: E1210 11:17:17.383112 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:17:23 crc kubenswrapper[4682]: E1210 11:17:23.387758 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:17:28 crc kubenswrapper[4682]: E1210 11:17:28.384656 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:17:33 crc kubenswrapper[4682]: I1210 11:17:33.077308 4682 scope.go:117] "RemoveContainer" containerID="20bd3b52de812a92adcb4ad276a9c0c51f28a4add7f8faba5813bee064947674" Dec 10 11:17:33 crc kubenswrapper[4682]: I1210 11:17:33.128233 4682 scope.go:117] "RemoveContainer" containerID="d0aabb3b100c7ef3730f7cadcb298843d3f9108639a542c744a1b631f934f1c9" Dec 10 11:17:33 crc kubenswrapper[4682]: I1210 11:17:33.190163 4682 scope.go:117] "RemoveContainer" containerID="1171262eb5fd830e2362004bf67fd313cacc92b677df7bec7f101e53b6967961" Dec 10 11:17:37 crc kubenswrapper[4682]: I1210 11:17:37.048169 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-6g6l7"] Dec 10 11:17:37 crc kubenswrapper[4682]: I1210 11:17:37.058250 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-ppzrh"] Dec 10 11:17:37 crc kubenswrapper[4682]: I1210 11:17:37.069566 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-h5fss"] Dec 10 11:17:37 crc kubenswrapper[4682]: I1210 11:17:37.077621 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-h5fss"] Dec 10 11:17:37 crc kubenswrapper[4682]: I1210 11:17:37.086167 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-ppzrh"] Dec 10 11:17:37 crc kubenswrapper[4682]: I1210 11:17:37.094299 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-6g6l7"] Dec 10 11:17:38 crc kubenswrapper[4682]: E1210 11:17:38.384777 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:17:38 crc kubenswrapper[4682]: I1210 11:17:38.406003 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2" path="/var/lib/kubelet/pods/3c6b21b2-6d5f-4cc0-a3af-f3cbb98067b2/volumes" Dec 10 11:17:38 crc kubenswrapper[4682]: I1210 11:17:38.406985 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54" path="/var/lib/kubelet/pods/8acf5b80-d6a3-45d8-9eb1-ffbd7fc4bf54/volumes" Dec 10 11:17:38 crc kubenswrapper[4682]: I1210 11:17:38.408017 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6258156-0c39-4f7b-a367-954f1eb68718" path="/var/lib/kubelet/pods/c6258156-0c39-4f7b-a367-954f1eb68718/volumes" Dec 10 11:17:39 crc kubenswrapper[4682]: I1210 11:17:39.063848 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-8h57v"] Dec 10 11:17:39 crc kubenswrapper[4682]: I1210 11:17:39.075177 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-8h57v"] Dec 10 11:17:40 crc kubenswrapper[4682]: I1210 11:17:40.393466 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="382d9ec8-5a3b-47b3-a301-955c7e2a4ecb" path="/var/lib/kubelet/pods/382d9ec8-5a3b-47b3-a301-955c7e2a4ecb/volumes" Dec 10 11:17:41 crc kubenswrapper[4682]: E1210 11:17:41.383415 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:17:49 crc kubenswrapper[4682]: E1210 11:17:49.383914 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:17:54 crc kubenswrapper[4682]: E1210 11:17:54.385232 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:18:00 crc kubenswrapper[4682]: E1210 11:18:00.390434 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:18:04 crc kubenswrapper[4682]: I1210 11:18:04.059155 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cloudkitty-storageinit-bfwj8"] Dec 10 11:18:04 crc kubenswrapper[4682]: I1210 11:18:04.077375 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cloudkitty-storageinit-bfwj8"] Dec 10 11:18:04 crc kubenswrapper[4682]: I1210 11:18:04.453385 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33627f7b-af0a-495f-b5cb-ed10c47ed17d" path="/var/lib/kubelet/pods/33627f7b-af0a-495f-b5cb-ed10c47ed17d/volumes" Dec 10 11:18:07 crc kubenswrapper[4682]: E1210 11:18:07.383606 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:18:12 crc kubenswrapper[4682]: E1210 11:18:12.383021 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:18:21 crc kubenswrapper[4682]: E1210 11:18:21.384071 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:18:26 crc kubenswrapper[4682]: E1210 11:18:26.384043 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:18:33 crc kubenswrapper[4682]: I1210 11:18:33.320964 4682 scope.go:117] "RemoveContainer" containerID="d44450d7db3b3f08590d80b491c4dc6dc1ef11723c687adada13904ae092f553" Dec 10 11:18:33 crc kubenswrapper[4682]: I1210 11:18:33.387391 4682 scope.go:117] "RemoveContainer" containerID="6763563e2a4caa280c3dfa595fb0f443b1e77703b9630b707d072babeac76023" Dec 10 11:18:33 crc kubenswrapper[4682]: I1210 11:18:33.436841 4682 scope.go:117] "RemoveContainer" containerID="e73667f467359e8c2f47d2b5b24aea120bcdc4334e2d094517c6b2c67b3d7f91" Dec 10 11:18:33 crc kubenswrapper[4682]: I1210 11:18:33.485963 4682 scope.go:117] "RemoveContainer" containerID="08dcaf058345a8eba6ed267cc166bcb549b4f3967e92d25302b179639f32454f" Dec 10 11:18:33 crc kubenswrapper[4682]: I1210 11:18:33.539251 4682 scope.go:117] "RemoveContainer" containerID="b0541aa7f9c23fb6eef1ef55937b6fa7738e2170c6eec45e4c19bbd20ca954da" Dec 10 11:18:34 crc kubenswrapper[4682]: E1210 11:18:34.383948 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:18:35 crc kubenswrapper[4682]: I1210 11:18:35.041664 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f97b-account-create-update-97hvk"] Dec 10 11:18:35 crc kubenswrapper[4682]: I1210 11:18:35.053072 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f97b-account-create-update-97hvk"] Dec 10 11:18:36 crc kubenswrapper[4682]: I1210 11:18:36.036862 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-7081-account-create-update-tgr2n"] Dec 10 11:18:36 crc kubenswrapper[4682]: I1210 11:18:36.051291 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-7081-account-create-update-tgr2n"] Dec 10 11:18:36 crc kubenswrapper[4682]: I1210 11:18:36.392106 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="695c9c33-02b4-4ba2-86d6-a6def1e67513" path="/var/lib/kubelet/pods/695c9c33-02b4-4ba2-86d6-a6def1e67513/volumes" Dec 10 11:18:36 crc kubenswrapper[4682]: I1210 11:18:36.392768 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e060569d-f156-4f2e-9796-e304b2d2be0d" path="/var/lib/kubelet/pods/e060569d-f156-4f2e-9796-e304b2d2be0d/volumes" Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.039938 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-dtzdg"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.050543 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dfvhs"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.062285 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-xkg9x"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.071153 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1f8c-account-create-update-lr7tf"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.078954 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-dtzdg"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.090413 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-xkg9x"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.100896 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1f8c-account-create-update-lr7tf"] Dec 10 11:18:37 crc kubenswrapper[4682]: I1210 11:18:37.111480 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dfvhs"] Dec 10 11:18:38 crc kubenswrapper[4682]: I1210 11:18:38.392726 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19e9c14e-416c-4f11-96ff-d2ccdac04cdf" path="/var/lib/kubelet/pods/19e9c14e-416c-4f11-96ff-d2ccdac04cdf/volumes" Dec 10 11:18:38 crc kubenswrapper[4682]: I1210 11:18:38.393679 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3adb294d-5fbd-4f36-b324-8e99e2e22cee" path="/var/lib/kubelet/pods/3adb294d-5fbd-4f36-b324-8e99e2e22cee/volumes" Dec 10 11:18:38 crc kubenswrapper[4682]: I1210 11:18:38.394271 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="636ce24d-c743-4ca9-b253-8c5da3d9f7c8" path="/var/lib/kubelet/pods/636ce24d-c743-4ca9-b253-8c5da3d9f7c8/volumes" Dec 10 11:18:38 crc kubenswrapper[4682]: I1210 11:18:38.394904 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4" path="/var/lib/kubelet/pods/6c8ebd3b-4d1e-45b3-92b3-577fe7d64dd4/volumes" Dec 10 11:18:41 crc kubenswrapper[4682]: E1210 11:18:41.385368 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:18:49 crc kubenswrapper[4682]: E1210 11:18:49.384163 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:18:53 crc kubenswrapper[4682]: E1210 11:18:53.383681 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:19:04 crc kubenswrapper[4682]: E1210 11:19:04.382626 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:19:04 crc kubenswrapper[4682]: E1210 11:19:04.384491 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.144096 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wdrpf"] Dec 10 11:19:12 crc kubenswrapper[4682]: E1210 11:19:12.145155 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="registry-server" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.145170 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="registry-server" Dec 10 11:19:12 crc kubenswrapper[4682]: E1210 11:19:12.145179 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="extract-utilities" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.145186 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="extract-utilities" Dec 10 11:19:12 crc kubenswrapper[4682]: E1210 11:19:12.145203 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="extract-content" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.145209 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="extract-content" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.145504 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b532b8-9d48-436c-acd3-85999347507d" containerName="registry-server" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.159335 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.174610 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdrpf"] Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.226762 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7g64\" (UniqueName: \"kubernetes.io/projected/6aaa6112-1a85-48d7-9551-c7b8d02d06db-kube-api-access-l7g64\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.227010 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-catalog-content\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.227249 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-utilities\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.329901 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-catalog-content\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.330129 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-utilities\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.330233 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7g64\" (UniqueName: \"kubernetes.io/projected/6aaa6112-1a85-48d7-9551-c7b8d02d06db-kube-api-access-l7g64\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.330441 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-catalog-content\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.330667 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-utilities\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.356828 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7g64\" (UniqueName: \"kubernetes.io/projected/6aaa6112-1a85-48d7-9551-c7b8d02d06db-kube-api-access-l7g64\") pod \"redhat-operators-wdrpf\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.494887 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:12 crc kubenswrapper[4682]: I1210 11:19:12.968903 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdrpf"] Dec 10 11:19:13 crc kubenswrapper[4682]: I1210 11:19:13.919816 4682 generic.go:334] "Generic (PLEG): container finished" podID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerID="df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5" exitCode=0 Dec 10 11:19:13 crc kubenswrapper[4682]: I1210 11:19:13.919940 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerDied","Data":"df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5"} Dec 10 11:19:13 crc kubenswrapper[4682]: I1210 11:19:13.920367 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerStarted","Data":"7a316b7be39f33fe47a29b01a3ecd7a00270cf13b9fa6f35808ebcee31647c0b"} Dec 10 11:19:13 crc kubenswrapper[4682]: I1210 11:19:13.934760 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:19:15 crc kubenswrapper[4682]: E1210 11:19:15.392921 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:19:15 crc kubenswrapper[4682]: I1210 11:19:15.948195 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerStarted","Data":"6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234"} Dec 10 11:19:19 crc kubenswrapper[4682]: E1210 11:19:19.383849 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:19:19 crc kubenswrapper[4682]: I1210 11:19:19.991695 4682 generic.go:334] "Generic (PLEG): container finished" podID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerID="6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234" exitCode=0 Dec 10 11:19:19 crc kubenswrapper[4682]: I1210 11:19:19.991779 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerDied","Data":"6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234"} Dec 10 11:19:21 crc kubenswrapper[4682]: I1210 11:19:21.002421 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerStarted","Data":"1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee"} Dec 10 11:19:21 crc kubenswrapper[4682]: I1210 11:19:21.027238 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wdrpf" podStartSLOduration=2.530818801 podStartE2EDuration="9.027205254s" podCreationTimestamp="2025-12-10 11:19:12 +0000 UTC" firstStartedPulling="2025-12-10 11:19:13.934455771 +0000 UTC m=+2034.254666531" lastFinishedPulling="2025-12-10 11:19:20.430842224 +0000 UTC m=+2040.751052984" observedRunningTime="2025-12-10 11:19:21.018568162 +0000 UTC m=+2041.338778932" watchObservedRunningTime="2025-12-10 11:19:21.027205254 +0000 UTC m=+2041.347416004" Dec 10 11:19:22 crc kubenswrapper[4682]: I1210 11:19:22.496354 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:22 crc kubenswrapper[4682]: I1210 11:19:22.496716 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:23 crc kubenswrapper[4682]: I1210 11:19:23.582715 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wdrpf" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="registry-server" probeResult="failure" output=< Dec 10 11:19:23 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 11:19:23 crc kubenswrapper[4682]: > Dec 10 11:19:25 crc kubenswrapper[4682]: I1210 11:19:25.052595 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dthrv"] Dec 10 11:19:25 crc kubenswrapper[4682]: I1210 11:19:25.064296 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dthrv"] Dec 10 11:19:26 crc kubenswrapper[4682]: I1210 11:19:26.395048 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55ad637c-32c9-421a-a8b7-ffe9cc9eebdc" path="/var/lib/kubelet/pods/55ad637c-32c9-421a-a8b7-ffe9cc9eebdc/volumes" Dec 10 11:19:29 crc kubenswrapper[4682]: E1210 11:19:29.383190 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:19:30 crc kubenswrapper[4682]: E1210 11:19:30.394027 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:19:32 crc kubenswrapper[4682]: I1210 11:19:32.590843 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:32 crc kubenswrapper[4682]: I1210 11:19:32.653238 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:32 crc kubenswrapper[4682]: I1210 11:19:32.930578 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdrpf"] Dec 10 11:19:33 crc kubenswrapper[4682]: I1210 11:19:33.673891 4682 scope.go:117] "RemoveContainer" containerID="6054d3c423fb5a8fd1aba35c17d8c5de6ad5f45ec02ff5058eeb5e4bcebcb7dc" Dec 10 11:19:33 crc kubenswrapper[4682]: I1210 11:19:33.711758 4682 scope.go:117] "RemoveContainer" containerID="79ef7bdfd497ae2ccedaab4746d45f88e415142a4c8d35d3d46f898712f41ae8" Dec 10 11:19:33 crc kubenswrapper[4682]: I1210 11:19:33.776626 4682 scope.go:117] "RemoveContainer" containerID="89acc5342582652ee325e0bd473ede0c367f8d632a5988e4bd2ca363b38195c4" Dec 10 11:19:33 crc kubenswrapper[4682]: I1210 11:19:33.815815 4682 scope.go:117] "RemoveContainer" containerID="f0121a462490bc1118c76bc208b7eef1a591d0918971ab3b1ecfcd2e70fa24b5" Dec 10 11:19:33 crc kubenswrapper[4682]: I1210 11:19:33.877597 4682 scope.go:117] "RemoveContainer" containerID="cfacf6d7fa9e8a9e4fb96f62f3e5663cd2958841cca20f01714826dc88e85369" Dec 10 11:19:33 crc kubenswrapper[4682]: I1210 11:19:33.927684 4682 scope.go:117] "RemoveContainer" containerID="c7977e71889f282b1f68b73051bd112c808ba3f97eb860dcc06afbb49a124e58" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.012036 4682 scope.go:117] "RemoveContainer" containerID="0baca866fff02b7e3ff6f983ed1e14551917a80f3170f4f93ab853288c0d194c" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.152899 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wdrpf" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="registry-server" containerID="cri-o://1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee" gracePeriod=2 Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.603176 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.787644 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-catalog-content\") pod \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.787709 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7g64\" (UniqueName: \"kubernetes.io/projected/6aaa6112-1a85-48d7-9551-c7b8d02d06db-kube-api-access-l7g64\") pod \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.787804 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-utilities\") pod \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\" (UID: \"6aaa6112-1a85-48d7-9551-c7b8d02d06db\") " Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.788803 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-utilities" (OuterVolumeSpecName: "utilities") pod "6aaa6112-1a85-48d7-9551-c7b8d02d06db" (UID: "6aaa6112-1a85-48d7-9551-c7b8d02d06db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.795390 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aaa6112-1a85-48d7-9551-c7b8d02d06db-kube-api-access-l7g64" (OuterVolumeSpecName: "kube-api-access-l7g64") pod "6aaa6112-1a85-48d7-9551-c7b8d02d06db" (UID: "6aaa6112-1a85-48d7-9551-c7b8d02d06db"). InnerVolumeSpecName "kube-api-access-l7g64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.890879 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7g64\" (UniqueName: \"kubernetes.io/projected/6aaa6112-1a85-48d7-9551-c7b8d02d06db-kube-api-access-l7g64\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.890921 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.908818 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6aaa6112-1a85-48d7-9551-c7b8d02d06db" (UID: "6aaa6112-1a85-48d7-9551-c7b8d02d06db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:19:34 crc kubenswrapper[4682]: I1210 11:19:34.992976 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa6112-1a85-48d7-9551-c7b8d02d06db-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.166872 4682 generic.go:334] "Generic (PLEG): container finished" podID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerID="1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee" exitCode=0 Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.166917 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerDied","Data":"1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee"} Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.166930 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdrpf" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.166956 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdrpf" event={"ID":"6aaa6112-1a85-48d7-9551-c7b8d02d06db","Type":"ContainerDied","Data":"7a316b7be39f33fe47a29b01a3ecd7a00270cf13b9fa6f35808ebcee31647c0b"} Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.166977 4682 scope.go:117] "RemoveContainer" containerID="1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.197657 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdrpf"] Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.202090 4682 scope.go:117] "RemoveContainer" containerID="6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.206295 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wdrpf"] Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.226659 4682 scope.go:117] "RemoveContainer" containerID="df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.275338 4682 scope.go:117] "RemoveContainer" containerID="1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee" Dec 10 11:19:35 crc kubenswrapper[4682]: E1210 11:19:35.275849 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee\": container with ID starting with 1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee not found: ID does not exist" containerID="1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.275904 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee"} err="failed to get container status \"1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee\": rpc error: code = NotFound desc = could not find container \"1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee\": container with ID starting with 1e5e1ce73ec486eec539db992a99d82ad8cd95f85ee48b5eb68f6ad45f6a42ee not found: ID does not exist" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.275936 4682 scope.go:117] "RemoveContainer" containerID="6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234" Dec 10 11:19:35 crc kubenswrapper[4682]: E1210 11:19:35.276217 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234\": container with ID starting with 6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234 not found: ID does not exist" containerID="6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.276248 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234"} err="failed to get container status \"6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234\": rpc error: code = NotFound desc = could not find container \"6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234\": container with ID starting with 6ab177f9835a226fbce2ebcf7e5752c0ee5525252cadfb7dcef749feb23ae234 not found: ID does not exist" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.276272 4682 scope.go:117] "RemoveContainer" containerID="df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5" Dec 10 11:19:35 crc kubenswrapper[4682]: E1210 11:19:35.276523 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5\": container with ID starting with df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5 not found: ID does not exist" containerID="df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5" Dec 10 11:19:35 crc kubenswrapper[4682]: I1210 11:19:35.276561 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5"} err="failed to get container status \"df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5\": rpc error: code = NotFound desc = could not find container \"df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5\": container with ID starting with df1afa65a3e289a474fc9672e9ac042d852d7e82c91ce278a473e63e112aafb5 not found: ID does not exist" Dec 10 11:19:36 crc kubenswrapper[4682]: I1210 11:19:36.403416 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" path="/var/lib/kubelet/pods/6aaa6112-1a85-48d7-9551-c7b8d02d06db/volumes" Dec 10 11:19:36 crc kubenswrapper[4682]: I1210 11:19:36.478237 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:19:36 crc kubenswrapper[4682]: I1210 11:19:36.478326 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:19:42 crc kubenswrapper[4682]: E1210 11:19:42.383664 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:19:43 crc kubenswrapper[4682]: E1210 11:19:43.383326 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:19:48 crc kubenswrapper[4682]: I1210 11:19:48.050169 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cfhl"] Dec 10 11:19:48 crc kubenswrapper[4682]: I1210 11:19:48.061701 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cfhl"] Dec 10 11:19:48 crc kubenswrapper[4682]: I1210 11:19:48.399849 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b73064f-224e-4f76-9c6b-dba2d1f1dbd7" path="/var/lib/kubelet/pods/8b73064f-224e-4f76-9c6b-dba2d1f1dbd7/volumes" Dec 10 11:19:51 crc kubenswrapper[4682]: I1210 11:19:51.046446 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-277zt"] Dec 10 11:19:51 crc kubenswrapper[4682]: I1210 11:19:51.069626 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-277zt"] Dec 10 11:19:52 crc kubenswrapper[4682]: I1210 11:19:52.392314 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3cd9ca1-7529-4458-a470-d3dfeed6ad9e" path="/var/lib/kubelet/pods/d3cd9ca1-7529-4458-a470-d3dfeed6ad9e/volumes" Dec 10 11:19:54 crc kubenswrapper[4682]: E1210 11:19:54.388086 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:19:55 crc kubenswrapper[4682]: E1210 11:19:55.382728 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:20:06 crc kubenswrapper[4682]: E1210 11:20:06.383823 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:20:06 crc kubenswrapper[4682]: I1210 11:20:06.478730 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:20:06 crc kubenswrapper[4682]: I1210 11:20:06.478797 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:20:09 crc kubenswrapper[4682]: E1210 11:20:09.383460 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:20:19 crc kubenswrapper[4682]: E1210 11:20:19.385963 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:20:22 crc kubenswrapper[4682]: E1210 11:20:22.383414 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:20:33 crc kubenswrapper[4682]: I1210 11:20:33.056670 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-bnl68"] Dec 10 11:20:33 crc kubenswrapper[4682]: I1210 11:20:33.073956 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-bnl68"] Dec 10 11:20:34 crc kubenswrapper[4682]: I1210 11:20:34.157656 4682 scope.go:117] "RemoveContainer" containerID="cdaaa0ff2203e7460452dbc1f9d6f7b93260a55ab8cccfbddc723ead84855ad5" Dec 10 11:20:34 crc kubenswrapper[4682]: I1210 11:20:34.234657 4682 scope.go:117] "RemoveContainer" containerID="daaae517be80afb8858259496e97ff8508ff92f67c729c44b9fad5def22ed979" Dec 10 11:20:34 crc kubenswrapper[4682]: E1210 11:20:34.382494 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:20:34 crc kubenswrapper[4682]: I1210 11:20:34.395418 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34d52064-5f24-4cc9-ad72-c04f77d892bf" path="/var/lib/kubelet/pods/34d52064-5f24-4cc9-ad72-c04f77d892bf/volumes" Dec 10 11:20:36 crc kubenswrapper[4682]: E1210 11:20:36.383051 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.479049 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.479147 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.479234 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.480459 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"09157c81d5b2d322b1ec981283a8a88601f69c18b66aa9a19af7086b9a080694"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.480777 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://09157c81d5b2d322b1ec981283a8a88601f69c18b66aa9a19af7086b9a080694" gracePeriod=600 Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.860207 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="09157c81d5b2d322b1ec981283a8a88601f69c18b66aa9a19af7086b9a080694" exitCode=0 Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.860255 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"09157c81d5b2d322b1ec981283a8a88601f69c18b66aa9a19af7086b9a080694"} Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.860544 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0"} Dec 10 11:20:36 crc kubenswrapper[4682]: I1210 11:20:36.860565 4682 scope.go:117] "RemoveContainer" containerID="975bf5410103b06e9260759d7bfa0ac1e50cb498f62cd00676cd4ad7d630412f" Dec 10 11:20:48 crc kubenswrapper[4682]: E1210 11:20:48.383994 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:20:49 crc kubenswrapper[4682]: E1210 11:20:49.383312 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:21:01 crc kubenswrapper[4682]: E1210 11:21:01.383395 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:21:02 crc kubenswrapper[4682]: E1210 11:21:02.384286 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.625650 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sjhpt"] Dec 10 11:21:06 crc kubenswrapper[4682]: E1210 11:21:06.626595 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="extract-utilities" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.626610 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="extract-utilities" Dec 10 11:21:06 crc kubenswrapper[4682]: E1210 11:21:06.626644 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="registry-server" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.626652 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="registry-server" Dec 10 11:21:06 crc kubenswrapper[4682]: E1210 11:21:06.626672 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="extract-content" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.626680 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="extract-content" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.626947 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aaa6112-1a85-48d7-9551-c7b8d02d06db" containerName="registry-server" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.632700 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.644730 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjhpt"] Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.673811 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-catalog-content\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.673867 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-utilities\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.674008 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6spfv\" (UniqueName: \"kubernetes.io/projected/805c5df4-4664-490c-87c1-273323409d29-kube-api-access-6spfv\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.775969 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-catalog-content\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.776012 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-utilities\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.776110 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6spfv\" (UniqueName: \"kubernetes.io/projected/805c5df4-4664-490c-87c1-273323409d29-kube-api-access-6spfv\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.776526 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-catalog-content\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.776577 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-utilities\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.798065 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6spfv\" (UniqueName: \"kubernetes.io/projected/805c5df4-4664-490c-87c1-273323409d29-kube-api-access-6spfv\") pod \"redhat-marketplace-sjhpt\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:06 crc kubenswrapper[4682]: I1210 11:21:06.955362 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:07 crc kubenswrapper[4682]: I1210 11:21:07.444822 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjhpt"] Dec 10 11:21:08 crc kubenswrapper[4682]: I1210 11:21:08.172281 4682 generic.go:334] "Generic (PLEG): container finished" podID="805c5df4-4664-490c-87c1-273323409d29" containerID="08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477" exitCode=0 Dec 10 11:21:08 crc kubenswrapper[4682]: I1210 11:21:08.172384 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerDied","Data":"08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477"} Dec 10 11:21:08 crc kubenswrapper[4682]: I1210 11:21:08.172890 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerStarted","Data":"3dbc4921ebbf5129005e6751d342d0624006e658d6e87f6fcd8b6e6ca2b72c25"} Dec 10 11:21:09 crc kubenswrapper[4682]: I1210 11:21:09.185130 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerStarted","Data":"7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad"} Dec 10 11:21:10 crc kubenswrapper[4682]: I1210 11:21:10.201784 4682 generic.go:334] "Generic (PLEG): container finished" podID="805c5df4-4664-490c-87c1-273323409d29" containerID="7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad" exitCode=0 Dec 10 11:21:10 crc kubenswrapper[4682]: I1210 11:21:10.201855 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerDied","Data":"7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad"} Dec 10 11:21:11 crc kubenswrapper[4682]: I1210 11:21:11.213668 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerStarted","Data":"85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2"} Dec 10 11:21:11 crc kubenswrapper[4682]: I1210 11:21:11.239147 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sjhpt" podStartSLOduration=2.8107314949999997 podStartE2EDuration="5.239127646s" podCreationTimestamp="2025-12-10 11:21:06 +0000 UTC" firstStartedPulling="2025-12-10 11:21:08.176293667 +0000 UTC m=+2148.496504417" lastFinishedPulling="2025-12-10 11:21:10.604689808 +0000 UTC m=+2150.924900568" observedRunningTime="2025-12-10 11:21:11.235152201 +0000 UTC m=+2151.555362971" watchObservedRunningTime="2025-12-10 11:21:11.239127646 +0000 UTC m=+2151.559338396" Dec 10 11:21:13 crc kubenswrapper[4682]: E1210 11:21:13.383851 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:21:16 crc kubenswrapper[4682]: I1210 11:21:16.955702 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:16 crc kubenswrapper[4682]: I1210 11:21:16.956713 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:17 crc kubenswrapper[4682]: I1210 11:21:17.001236 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:17 crc kubenswrapper[4682]: I1210 11:21:17.337283 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:17 crc kubenswrapper[4682]: I1210 11:21:17.379348 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjhpt"] Dec 10 11:21:17 crc kubenswrapper[4682]: E1210 11:21:17.383363 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:21:19 crc kubenswrapper[4682]: I1210 11:21:19.297001 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sjhpt" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="registry-server" containerID="cri-o://85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2" gracePeriod=2 Dec 10 11:21:19 crc kubenswrapper[4682]: I1210 11:21:19.882184 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.053786 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-utilities\") pod \"805c5df4-4664-490c-87c1-273323409d29\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.053875 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-catalog-content\") pod \"805c5df4-4664-490c-87c1-273323409d29\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.054150 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6spfv\" (UniqueName: \"kubernetes.io/projected/805c5df4-4664-490c-87c1-273323409d29-kube-api-access-6spfv\") pod \"805c5df4-4664-490c-87c1-273323409d29\" (UID: \"805c5df4-4664-490c-87c1-273323409d29\") " Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.054715 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-utilities" (OuterVolumeSpecName: "utilities") pod "805c5df4-4664-490c-87c1-273323409d29" (UID: "805c5df4-4664-490c-87c1-273323409d29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.061330 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/805c5df4-4664-490c-87c1-273323409d29-kube-api-access-6spfv" (OuterVolumeSpecName: "kube-api-access-6spfv") pod "805c5df4-4664-490c-87c1-273323409d29" (UID: "805c5df4-4664-490c-87c1-273323409d29"). InnerVolumeSpecName "kube-api-access-6spfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.073689 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "805c5df4-4664-490c-87c1-273323409d29" (UID: "805c5df4-4664-490c-87c1-273323409d29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.157681 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6spfv\" (UniqueName: \"kubernetes.io/projected/805c5df4-4664-490c-87c1-273323409d29-kube-api-access-6spfv\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.158009 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.158087 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/805c5df4-4664-490c-87c1-273323409d29-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.310686 4682 generic.go:334] "Generic (PLEG): container finished" podID="805c5df4-4664-490c-87c1-273323409d29" containerID="85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2" exitCode=0 Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.310734 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjhpt" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.310768 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerDied","Data":"85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2"} Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.310841 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjhpt" event={"ID":"805c5df4-4664-490c-87c1-273323409d29","Type":"ContainerDied","Data":"3dbc4921ebbf5129005e6751d342d0624006e658d6e87f6fcd8b6e6ca2b72c25"} Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.310870 4682 scope.go:117] "RemoveContainer" containerID="85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.351550 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjhpt"] Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.355968 4682 scope.go:117] "RemoveContainer" containerID="7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.360432 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjhpt"] Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.383666 4682 scope.go:117] "RemoveContainer" containerID="08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.393660 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="805c5df4-4664-490c-87c1-273323409d29" path="/var/lib/kubelet/pods/805c5df4-4664-490c-87c1-273323409d29/volumes" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.439616 4682 scope.go:117] "RemoveContainer" containerID="85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2" Dec 10 11:21:20 crc kubenswrapper[4682]: E1210 11:21:20.440071 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2\": container with ID starting with 85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2 not found: ID does not exist" containerID="85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.440117 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2"} err="failed to get container status \"85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2\": rpc error: code = NotFound desc = could not find container \"85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2\": container with ID starting with 85a78c5eeb777462e96195b901a022592b7b99e9ed92922afe8766285eb950f2 not found: ID does not exist" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.440159 4682 scope.go:117] "RemoveContainer" containerID="7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad" Dec 10 11:21:20 crc kubenswrapper[4682]: E1210 11:21:20.440526 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad\": container with ID starting with 7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad not found: ID does not exist" containerID="7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.440569 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad"} err="failed to get container status \"7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad\": rpc error: code = NotFound desc = could not find container \"7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad\": container with ID starting with 7e8953fa989f64be876715f46e23a0e843b70b738f2637b289c162df26849aad not found: ID does not exist" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.440598 4682 scope.go:117] "RemoveContainer" containerID="08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477" Dec 10 11:21:20 crc kubenswrapper[4682]: E1210 11:21:20.440971 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477\": container with ID starting with 08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477 not found: ID does not exist" containerID="08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477" Dec 10 11:21:20 crc kubenswrapper[4682]: I1210 11:21:20.440998 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477"} err="failed to get container status \"08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477\": rpc error: code = NotFound desc = could not find container \"08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477\": container with ID starting with 08c84ca02bff03575550456d9d41535dfbd5b7be75a9f63eb03b629be4c28477 not found: ID does not exist" Dec 10 11:21:24 crc kubenswrapper[4682]: E1210 11:21:24.384387 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:21:30 crc kubenswrapper[4682]: E1210 11:21:30.433808 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:21:34 crc kubenswrapper[4682]: I1210 11:21:34.365274 4682 scope.go:117] "RemoveContainer" containerID="ff3e928170eb52aa6c63146bd59612247767f89d8057247a6b538918b7f6e802" Dec 10 11:21:35 crc kubenswrapper[4682]: E1210 11:21:35.382896 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:21:43 crc kubenswrapper[4682]: E1210 11:21:43.397754 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:21:46 crc kubenswrapper[4682]: E1210 11:21:46.383298 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:21:54 crc kubenswrapper[4682]: E1210 11:21:54.521124 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:21:54 crc kubenswrapper[4682]: E1210 11:21:54.521950 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:21:54 crc kubenswrapper[4682]: E1210 11:21:54.522190 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:54 crc kubenswrapper[4682]: E1210 11:21:54.523512 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:21:58 crc kubenswrapper[4682]: E1210 11:21:58.461841 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:21:58 crc kubenswrapper[4682]: E1210 11:21:58.462913 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:21:58 crc kubenswrapper[4682]: E1210 11:21:58.463176 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:21:58 crc kubenswrapper[4682]: E1210 11:21:58.464416 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:22:05 crc kubenswrapper[4682]: E1210 11:22:05.397771 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:22:11 crc kubenswrapper[4682]: E1210 11:22:11.383931 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:22:17 crc kubenswrapper[4682]: E1210 11:22:17.383289 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:22:22 crc kubenswrapper[4682]: E1210 11:22:22.384769 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:22:29 crc kubenswrapper[4682]: E1210 11:22:29.383423 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:22:34 crc kubenswrapper[4682]: E1210 11:22:34.383551 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:22:36 crc kubenswrapper[4682]: I1210 11:22:36.478951 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:22:36 crc kubenswrapper[4682]: I1210 11:22:36.479286 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:22:40 crc kubenswrapper[4682]: E1210 11:22:40.384986 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:22:45 crc kubenswrapper[4682]: E1210 11:22:45.383610 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.454123 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-254jm"] Dec 10 11:22:49 crc kubenswrapper[4682]: E1210 11:22:49.455096 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="registry-server" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.455111 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="registry-server" Dec 10 11:22:49 crc kubenswrapper[4682]: E1210 11:22:49.455144 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="extract-content" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.455153 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="extract-content" Dec 10 11:22:49 crc kubenswrapper[4682]: E1210 11:22:49.455178 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="extract-utilities" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.455186 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="extract-utilities" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.455470 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="805c5df4-4664-490c-87c1-273323409d29" containerName="registry-server" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.457344 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.466247 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-254jm"] Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.548833 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-catalog-content\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.548969 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmpms\" (UniqueName: \"kubernetes.io/projected/509f02c7-e075-4496-9144-282265d68514-kube-api-access-kmpms\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.549018 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-utilities\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.651316 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-catalog-content\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.651418 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmpms\" (UniqueName: \"kubernetes.io/projected/509f02c7-e075-4496-9144-282265d68514-kube-api-access-kmpms\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.651451 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-utilities\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.651948 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-utilities\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.652035 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-catalog-content\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.674415 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmpms\" (UniqueName: \"kubernetes.io/projected/509f02c7-e075-4496-9144-282265d68514-kube-api-access-kmpms\") pod \"certified-operators-254jm\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:49 crc kubenswrapper[4682]: I1210 11:22:49.783099 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:50 crc kubenswrapper[4682]: I1210 11:22:50.393939 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-254jm"] Dec 10 11:22:51 crc kubenswrapper[4682]: I1210 11:22:51.299586 4682 generic.go:334] "Generic (PLEG): container finished" podID="509f02c7-e075-4496-9144-282265d68514" containerID="dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a" exitCode=0 Dec 10 11:22:51 crc kubenswrapper[4682]: I1210 11:22:51.299635 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerDied","Data":"dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a"} Dec 10 11:22:51 crc kubenswrapper[4682]: I1210 11:22:51.299664 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerStarted","Data":"aefcebcb2a59f28470f11ca973c5ab7ee4eb3cfc9eefa8c3a250bb3d67a8c824"} Dec 10 11:22:52 crc kubenswrapper[4682]: I1210 11:22:52.316591 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerStarted","Data":"1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168"} Dec 10 11:22:52 crc kubenswrapper[4682]: E1210 11:22:52.383684 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:22:53 crc kubenswrapper[4682]: I1210 11:22:53.326902 4682 generic.go:334] "Generic (PLEG): container finished" podID="509f02c7-e075-4496-9144-282265d68514" containerID="1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168" exitCode=0 Dec 10 11:22:53 crc kubenswrapper[4682]: I1210 11:22:53.326941 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerDied","Data":"1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168"} Dec 10 11:22:55 crc kubenswrapper[4682]: I1210 11:22:55.347151 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerStarted","Data":"aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5"} Dec 10 11:22:55 crc kubenswrapper[4682]: I1210 11:22:55.351802 4682 generic.go:334] "Generic (PLEG): container finished" podID="df88b6db-13a9-4d76-a9da-e259ef1f79a2" containerID="618c4ef5c6e030299c8a389db8c234467e4516465f7514e3183d44b4d3695ae0" exitCode=2 Dec 10 11:22:55 crc kubenswrapper[4682]: I1210 11:22:55.351851 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" event={"ID":"df88b6db-13a9-4d76-a9da-e259ef1f79a2","Type":"ContainerDied","Data":"618c4ef5c6e030299c8a389db8c234467e4516465f7514e3183d44b4d3695ae0"} Dec 10 11:22:55 crc kubenswrapper[4682]: I1210 11:22:55.374601 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-254jm" podStartSLOduration=3.524784952 podStartE2EDuration="6.374572842s" podCreationTimestamp="2025-12-10 11:22:49 +0000 UTC" firstStartedPulling="2025-12-10 11:22:51.302658477 +0000 UTC m=+2251.622869237" lastFinishedPulling="2025-12-10 11:22:54.152446367 +0000 UTC m=+2254.472657127" observedRunningTime="2025-12-10 11:22:55.366852968 +0000 UTC m=+2255.687063728" watchObservedRunningTime="2025-12-10 11:22:55.374572842 +0000 UTC m=+2255.694783592" Dec 10 11:22:56 crc kubenswrapper[4682]: I1210 11:22:56.832597 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.008895 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-inventory\") pod \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.008971 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-ssh-key\") pod \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.009117 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fspbc\" (UniqueName: \"kubernetes.io/projected/df88b6db-13a9-4d76-a9da-e259ef1f79a2-kube-api-access-fspbc\") pod \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\" (UID: \"df88b6db-13a9-4d76-a9da-e259ef1f79a2\") " Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.014118 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df88b6db-13a9-4d76-a9da-e259ef1f79a2-kube-api-access-fspbc" (OuterVolumeSpecName: "kube-api-access-fspbc") pod "df88b6db-13a9-4d76-a9da-e259ef1f79a2" (UID: "df88b6db-13a9-4d76-a9da-e259ef1f79a2"). InnerVolumeSpecName "kube-api-access-fspbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.039617 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-inventory" (OuterVolumeSpecName: "inventory") pod "df88b6db-13a9-4d76-a9da-e259ef1f79a2" (UID: "df88b6db-13a9-4d76-a9da-e259ef1f79a2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.041391 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "df88b6db-13a9-4d76-a9da-e259ef1f79a2" (UID: "df88b6db-13a9-4d76-a9da-e259ef1f79a2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.111112 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.111154 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/df88b6db-13a9-4d76-a9da-e259ef1f79a2-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.111166 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fspbc\" (UniqueName: \"kubernetes.io/projected/df88b6db-13a9-4d76-a9da-e259ef1f79a2-kube-api-access-fspbc\") on node \"crc\" DevicePath \"\"" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.376284 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" event={"ID":"df88b6db-13a9-4d76-a9da-e259ef1f79a2","Type":"ContainerDied","Data":"3866b4ead541a51109967f12e3252cc6dd31959e535de052e6957249df59d190"} Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.376638 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3866b4ead541a51109967f12e3252cc6dd31959e535de052e6957249df59d190" Dec 10 11:22:57 crc kubenswrapper[4682]: I1210 11:22:57.376360 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-49h6v" Dec 10 11:22:59 crc kubenswrapper[4682]: I1210 11:22:59.783690 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:59 crc kubenswrapper[4682]: I1210 11:22:59.783987 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:22:59 crc kubenswrapper[4682]: I1210 11:22:59.830762 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:23:00 crc kubenswrapper[4682]: E1210 11:23:00.390264 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:23:00 crc kubenswrapper[4682]: I1210 11:23:00.448423 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:23:00 crc kubenswrapper[4682]: I1210 11:23:00.496735 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-254jm"] Dec 10 11:23:02 crc kubenswrapper[4682]: I1210 11:23:02.424749 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-254jm" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="registry-server" containerID="cri-o://aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5" gracePeriod=2 Dec 10 11:23:02 crc kubenswrapper[4682]: I1210 11:23:02.946012 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.037757 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmpms\" (UniqueName: \"kubernetes.io/projected/509f02c7-e075-4496-9144-282265d68514-kube-api-access-kmpms\") pod \"509f02c7-e075-4496-9144-282265d68514\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.037862 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-utilities\") pod \"509f02c7-e075-4496-9144-282265d68514\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.038012 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-catalog-content\") pod \"509f02c7-e075-4496-9144-282265d68514\" (UID: \"509f02c7-e075-4496-9144-282265d68514\") " Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.038808 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-utilities" (OuterVolumeSpecName: "utilities") pod "509f02c7-e075-4496-9144-282265d68514" (UID: "509f02c7-e075-4496-9144-282265d68514"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.043788 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/509f02c7-e075-4496-9144-282265d68514-kube-api-access-kmpms" (OuterVolumeSpecName: "kube-api-access-kmpms") pod "509f02c7-e075-4496-9144-282265d68514" (UID: "509f02c7-e075-4496-9144-282265d68514"). InnerVolumeSpecName "kube-api-access-kmpms". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.088765 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "509f02c7-e075-4496-9144-282265d68514" (UID: "509f02c7-e075-4496-9144-282265d68514"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.141139 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmpms\" (UniqueName: \"kubernetes.io/projected/509f02c7-e075-4496-9144-282265d68514-kube-api-access-kmpms\") on node \"crc\" DevicePath \"\"" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.141172 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.141184 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/509f02c7-e075-4496-9144-282265d68514-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.436849 4682 generic.go:334] "Generic (PLEG): container finished" podID="509f02c7-e075-4496-9144-282265d68514" containerID="aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5" exitCode=0 Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.436964 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-254jm" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.437016 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerDied","Data":"aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5"} Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.437843 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-254jm" event={"ID":"509f02c7-e075-4496-9144-282265d68514","Type":"ContainerDied","Data":"aefcebcb2a59f28470f11ca973c5ab7ee4eb3cfc9eefa8c3a250bb3d67a8c824"} Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.437876 4682 scope.go:117] "RemoveContainer" containerID="aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.459756 4682 scope.go:117] "RemoveContainer" containerID="1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.488543 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-254jm"] Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.500861 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-254jm"] Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.503342 4682 scope.go:117] "RemoveContainer" containerID="dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.549547 4682 scope.go:117] "RemoveContainer" containerID="aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5" Dec 10 11:23:03 crc kubenswrapper[4682]: E1210 11:23:03.549967 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5\": container with ID starting with aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5 not found: ID does not exist" containerID="aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.550012 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5"} err="failed to get container status \"aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5\": rpc error: code = NotFound desc = could not find container \"aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5\": container with ID starting with aa4df9d3c8b5bbcb337d9a9f8698e8e44df58012c225a6749c8ee36f4d1f22e5 not found: ID does not exist" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.550039 4682 scope.go:117] "RemoveContainer" containerID="1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168" Dec 10 11:23:03 crc kubenswrapper[4682]: E1210 11:23:03.550368 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168\": container with ID starting with 1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168 not found: ID does not exist" containerID="1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.550516 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168"} err="failed to get container status \"1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168\": rpc error: code = NotFound desc = could not find container \"1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168\": container with ID starting with 1424825903d3ffb1f9452629df25d106385088da1d1a2fe5148fffcf3b9a6168 not found: ID does not exist" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.550632 4682 scope.go:117] "RemoveContainer" containerID="dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a" Dec 10 11:23:03 crc kubenswrapper[4682]: E1210 11:23:03.551072 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a\": container with ID starting with dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a not found: ID does not exist" containerID="dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a" Dec 10 11:23:03 crc kubenswrapper[4682]: I1210 11:23:03.551105 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a"} err="failed to get container status \"dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a\": rpc error: code = NotFound desc = could not find container \"dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a\": container with ID starting with dabd64350f0b19f3aa091c7baa3f983074a1ba91d3b0f062d44f10403311d42a not found: ID does not exist" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.046533 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm"] Dec 10 11:23:04 crc kubenswrapper[4682]: E1210 11:23:04.047427 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="registry-server" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.047454 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="registry-server" Dec 10 11:23:04 crc kubenswrapper[4682]: E1210 11:23:04.047503 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df88b6db-13a9-4d76-a9da-e259ef1f79a2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.047515 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="df88b6db-13a9-4d76-a9da-e259ef1f79a2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:23:04 crc kubenswrapper[4682]: E1210 11:23:04.047533 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="extract-content" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.047541 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="extract-content" Dec 10 11:23:04 crc kubenswrapper[4682]: E1210 11:23:04.047562 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="extract-utilities" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.047570 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="extract-utilities" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.047813 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="df88b6db-13a9-4d76-a9da-e259ef1f79a2" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.047852 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="509f02c7-e075-4496-9144-282265d68514" containerName="registry-server" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.048913 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.051573 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.051615 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.052382 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.052662 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.070846 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm"] Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.163051 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.163099 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scntr\" (UniqueName: \"kubernetes.io/projected/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-kube-api-access-scntr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.163136 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.264918 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.265119 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.265142 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scntr\" (UniqueName: \"kubernetes.io/projected/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-kube-api-access-scntr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.273545 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.274080 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.280169 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scntr\" (UniqueName: \"kubernetes.io/projected/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-kube-api-access-scntr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.380017 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.396250 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="509f02c7-e075-4496-9144-282265d68514" path="/var/lib/kubelet/pods/509f02c7-e075-4496-9144-282265d68514/volumes" Dec 10 11:23:04 crc kubenswrapper[4682]: I1210 11:23:04.744448 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm"] Dec 10 11:23:04 crc kubenswrapper[4682]: W1210 11:23:04.752895 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09844e48_f7bc_4c51_9dfa_dcc6daafb27f.slice/crio-fde50bc0ca758d7adda649ff7a1042083734e6bd6056e26780e7636534083b61 WatchSource:0}: Error finding container fde50bc0ca758d7adda649ff7a1042083734e6bd6056e26780e7636534083b61: Status 404 returned error can't find the container with id fde50bc0ca758d7adda649ff7a1042083734e6bd6056e26780e7636534083b61 Dec 10 11:23:05 crc kubenswrapper[4682]: I1210 11:23:05.457100 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" event={"ID":"09844e48-f7bc-4c51-9dfa-dcc6daafb27f","Type":"ContainerStarted","Data":"fde50bc0ca758d7adda649ff7a1042083734e6bd6056e26780e7636534083b61"} Dec 10 11:23:06 crc kubenswrapper[4682]: I1210 11:23:06.472042 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" event={"ID":"09844e48-f7bc-4c51-9dfa-dcc6daafb27f","Type":"ContainerStarted","Data":"58d50ef1b7d6d032fe99cf893a4d48888b18f8222b35db4bdf9b8ab37f4406f5"} Dec 10 11:23:06 crc kubenswrapper[4682]: I1210 11:23:06.478433 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:23:06 crc kubenswrapper[4682]: I1210 11:23:06.478481 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:23:06 crc kubenswrapper[4682]: I1210 11:23:06.496744 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" podStartSLOduration=2.066007276 podStartE2EDuration="2.496727119s" podCreationTimestamp="2025-12-10 11:23:04 +0000 UTC" firstStartedPulling="2025-12-10 11:23:04.755372174 +0000 UTC m=+2265.075582924" lastFinishedPulling="2025-12-10 11:23:05.186092017 +0000 UTC m=+2265.506302767" observedRunningTime="2025-12-10 11:23:06.487377195 +0000 UTC m=+2266.807587975" watchObservedRunningTime="2025-12-10 11:23:06.496727119 +0000 UTC m=+2266.816937859" Dec 10 11:23:07 crc kubenswrapper[4682]: E1210 11:23:07.393769 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:23:12 crc kubenswrapper[4682]: E1210 11:23:12.382518 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:23:20 crc kubenswrapper[4682]: E1210 11:23:20.394887 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:23:25 crc kubenswrapper[4682]: E1210 11:23:25.384970 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:23:34 crc kubenswrapper[4682]: I1210 11:23:34.533530 4682 scope.go:117] "RemoveContainer" containerID="40ebed827bad84b0cfe8c31fa5351e8cac8d950b7e604eb67e06672ce86a0c88" Dec 10 11:23:34 crc kubenswrapper[4682]: I1210 11:23:34.555076 4682 scope.go:117] "RemoveContainer" containerID="dd07fcd5506021f86627ec496dfbfd26a136fab3258c18e01dd1b11932cbccf8" Dec 10 11:23:34 crc kubenswrapper[4682]: I1210 11:23:34.612316 4682 scope.go:117] "RemoveContainer" containerID="6fbd2abf0acdcb21804b1915eeaa4d048a0f082b421afb3ea43025132ad1f73e" Dec 10 11:23:35 crc kubenswrapper[4682]: E1210 11:23:35.382107 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:23:36 crc kubenswrapper[4682]: E1210 11:23:36.383990 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.478678 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.478752 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.478801 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.479705 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.479780 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" gracePeriod=600 Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.803408 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" exitCode=0 Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.803492 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0"} Dec 10 11:23:36 crc kubenswrapper[4682]: I1210 11:23:36.803548 4682 scope.go:117] "RemoveContainer" containerID="09157c81d5b2d322b1ec981283a8a88601f69c18b66aa9a19af7086b9a080694" Dec 10 11:23:37 crc kubenswrapper[4682]: E1210 11:23:37.158253 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:23:37 crc kubenswrapper[4682]: I1210 11:23:37.817955 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:23:37 crc kubenswrapper[4682]: E1210 11:23:37.818604 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:23:48 crc kubenswrapper[4682]: E1210 11:23:48.384124 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:23:51 crc kubenswrapper[4682]: I1210 11:23:51.381328 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:23:51 crc kubenswrapper[4682]: E1210 11:23:51.381925 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:23:51 crc kubenswrapper[4682]: E1210 11:23:51.383102 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:24:00 crc kubenswrapper[4682]: E1210 11:24:00.389787 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:24:04 crc kubenswrapper[4682]: I1210 11:24:04.381926 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:24:04 crc kubenswrapper[4682]: E1210 11:24:04.383049 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:24:05 crc kubenswrapper[4682]: E1210 11:24:05.383692 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:24:14 crc kubenswrapper[4682]: E1210 11:24:14.384843 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:24:18 crc kubenswrapper[4682]: I1210 11:24:18.382578 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:24:18 crc kubenswrapper[4682]: E1210 11:24:18.384080 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:24:18 crc kubenswrapper[4682]: E1210 11:24:18.384641 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:24:26 crc kubenswrapper[4682]: E1210 11:24:26.384617 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:24:33 crc kubenswrapper[4682]: E1210 11:24:33.384039 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:24:33 crc kubenswrapper[4682]: I1210 11:24:33.384193 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:24:33 crc kubenswrapper[4682]: E1210 11:24:33.385538 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:24:39 crc kubenswrapper[4682]: E1210 11:24:39.382786 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:24:44 crc kubenswrapper[4682]: E1210 11:24:44.388382 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:24:47 crc kubenswrapper[4682]: I1210 11:24:47.381385 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:24:47 crc kubenswrapper[4682]: E1210 11:24:47.381876 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:24:52 crc kubenswrapper[4682]: E1210 11:24:52.385987 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:24:55 crc kubenswrapper[4682]: E1210 11:24:55.382740 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:24:59 crc kubenswrapper[4682]: I1210 11:24:59.382092 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:24:59 crc kubenswrapper[4682]: E1210 11:24:59.383064 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:25:06 crc kubenswrapper[4682]: E1210 11:25:06.383181 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:25:08 crc kubenswrapper[4682]: E1210 11:25:08.387001 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:25:11 crc kubenswrapper[4682]: I1210 11:25:11.381855 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:25:11 crc kubenswrapper[4682]: E1210 11:25:11.382689 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:25:17 crc kubenswrapper[4682]: E1210 11:25:17.385792 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:25:23 crc kubenswrapper[4682]: E1210 11:25:23.383104 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:25:26 crc kubenswrapper[4682]: I1210 11:25:26.381278 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:25:26 crc kubenswrapper[4682]: E1210 11:25:26.382131 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:25:29 crc kubenswrapper[4682]: E1210 11:25:29.383305 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:25:34 crc kubenswrapper[4682]: E1210 11:25:34.384639 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:25:37 crc kubenswrapper[4682]: I1210 11:25:37.381934 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:25:37 crc kubenswrapper[4682]: E1210 11:25:37.382939 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:25:41 crc kubenswrapper[4682]: E1210 11:25:41.385450 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:25:49 crc kubenswrapper[4682]: E1210 11:25:49.384638 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:25:52 crc kubenswrapper[4682]: I1210 11:25:52.382275 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:25:52 crc kubenswrapper[4682]: E1210 11:25:52.383414 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:25:52 crc kubenswrapper[4682]: E1210 11:25:52.383941 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:26:02 crc kubenswrapper[4682]: E1210 11:26:02.384182 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:26:03 crc kubenswrapper[4682]: I1210 11:26:03.381628 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:26:03 crc kubenswrapper[4682]: E1210 11:26:03.382174 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:26:06 crc kubenswrapper[4682]: E1210 11:26:06.383682 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:26:16 crc kubenswrapper[4682]: I1210 11:26:16.382081 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:26:16 crc kubenswrapper[4682]: E1210 11:26:16.383326 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:26:16 crc kubenswrapper[4682]: E1210 11:26:16.386602 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:26:18 crc kubenswrapper[4682]: E1210 11:26:18.384555 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:26:29 crc kubenswrapper[4682]: I1210 11:26:29.381980 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:26:29 crc kubenswrapper[4682]: E1210 11:26:29.383238 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:26:31 crc kubenswrapper[4682]: E1210 11:26:31.384912 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:26:32 crc kubenswrapper[4682]: E1210 11:26:32.384655 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:26:41 crc kubenswrapper[4682]: I1210 11:26:41.381219 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:26:41 crc kubenswrapper[4682]: E1210 11:26:41.381839 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:26:46 crc kubenswrapper[4682]: E1210 11:26:46.386276 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:26:46 crc kubenswrapper[4682]: E1210 11:26:46.386345 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:26:46 crc kubenswrapper[4682]: I1210 11:26:46.818249 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j6bh2"] Dec 10 11:26:46 crc kubenswrapper[4682]: I1210 11:26:46.820844 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:46 crc kubenswrapper[4682]: I1210 11:26:46.842630 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j6bh2"] Dec 10 11:26:46 crc kubenswrapper[4682]: I1210 11:26:46.941665 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v5rv\" (UniqueName: \"kubernetes.io/projected/892faef6-e365-4978-aba2-d68e2f4ec29f-kube-api-access-2v5rv\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:46 crc kubenswrapper[4682]: I1210 11:26:46.941748 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-catalog-content\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:46 crc kubenswrapper[4682]: I1210 11:26:46.942284 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-utilities\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.043972 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-catalog-content\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.044209 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-utilities\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.044245 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v5rv\" (UniqueName: \"kubernetes.io/projected/892faef6-e365-4978-aba2-d68e2f4ec29f-kube-api-access-2v5rv\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.044757 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-catalog-content\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.044788 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-utilities\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.072767 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v5rv\" (UniqueName: \"kubernetes.io/projected/892faef6-e365-4978-aba2-d68e2f4ec29f-kube-api-access-2v5rv\") pod \"community-operators-j6bh2\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.151513 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.684200 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j6bh2"] Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.909405 4682 generic.go:334] "Generic (PLEG): container finished" podID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerID="45ab9269e2dbfa41070ac45a8ef68ac0dd493368c641147bef39bf0e20480684" exitCode=0 Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.909598 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j6bh2" event={"ID":"892faef6-e365-4978-aba2-d68e2f4ec29f","Type":"ContainerDied","Data":"45ab9269e2dbfa41070ac45a8ef68ac0dd493368c641147bef39bf0e20480684"} Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.910010 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j6bh2" event={"ID":"892faef6-e365-4978-aba2-d68e2f4ec29f","Type":"ContainerStarted","Data":"cc9990c04e0009901c16cdbd2dad86dd2fb336b5be4271262ed0539732b75255"} Dec 10 11:26:47 crc kubenswrapper[4682]: I1210 11:26:47.911746 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:26:49 crc kubenswrapper[4682]: I1210 11:26:49.928742 4682 generic.go:334] "Generic (PLEG): container finished" podID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerID="6eca96da48d8bd940702dedb4b92ec7bb51ccb00fca5253df2f5c793bf992c70" exitCode=0 Dec 10 11:26:49 crc kubenswrapper[4682]: I1210 11:26:49.928825 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j6bh2" event={"ID":"892faef6-e365-4978-aba2-d68e2f4ec29f","Type":"ContainerDied","Data":"6eca96da48d8bd940702dedb4b92ec7bb51ccb00fca5253df2f5c793bf992c70"} Dec 10 11:26:51 crc kubenswrapper[4682]: I1210 11:26:51.956305 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j6bh2" event={"ID":"892faef6-e365-4978-aba2-d68e2f4ec29f","Type":"ContainerStarted","Data":"34f5c6f6e73a09976ffff7e2d225e46efb30a01aed9a1fc187ef880441b1419b"} Dec 10 11:26:51 crc kubenswrapper[4682]: I1210 11:26:51.979409 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j6bh2" podStartSLOduration=2.493704744 podStartE2EDuration="5.979387789s" podCreationTimestamp="2025-12-10 11:26:46 +0000 UTC" firstStartedPulling="2025-12-10 11:26:47.911523281 +0000 UTC m=+2488.231734031" lastFinishedPulling="2025-12-10 11:26:51.397206326 +0000 UTC m=+2491.717417076" observedRunningTime="2025-12-10 11:26:51.975309448 +0000 UTC m=+2492.295520218" watchObservedRunningTime="2025-12-10 11:26:51.979387789 +0000 UTC m=+2492.299598549" Dec 10 11:26:53 crc kubenswrapper[4682]: I1210 11:26:53.381054 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:26:53 crc kubenswrapper[4682]: E1210 11:26:53.381750 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:26:57 crc kubenswrapper[4682]: I1210 11:26:57.151920 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:57 crc kubenswrapper[4682]: I1210 11:26:57.152439 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:57 crc kubenswrapper[4682]: I1210 11:26:57.200368 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:57 crc kubenswrapper[4682]: E1210 11:26:57.383821 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:26:58 crc kubenswrapper[4682]: I1210 11:26:58.075018 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:26:58 crc kubenswrapper[4682]: I1210 11:26:58.135432 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j6bh2"] Dec 10 11:26:58 crc kubenswrapper[4682]: E1210 11:26:58.477783 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:26:58 crc kubenswrapper[4682]: E1210 11:26:58.478197 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:26:58 crc kubenswrapper[4682]: E1210 11:26:58.478350 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:26:58 crc kubenswrapper[4682]: E1210 11:26:58.480252 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:27:00 crc kubenswrapper[4682]: I1210 11:27:00.021567 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j6bh2" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="registry-server" containerID="cri-o://34f5c6f6e73a09976ffff7e2d225e46efb30a01aed9a1fc187ef880441b1419b" gracePeriod=2 Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.033188 4682 generic.go:334] "Generic (PLEG): container finished" podID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerID="34f5c6f6e73a09976ffff7e2d225e46efb30a01aed9a1fc187ef880441b1419b" exitCode=0 Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.033276 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j6bh2" event={"ID":"892faef6-e365-4978-aba2-d68e2f4ec29f","Type":"ContainerDied","Data":"34f5c6f6e73a09976ffff7e2d225e46efb30a01aed9a1fc187ef880441b1419b"} Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.141388 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.253054 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v5rv\" (UniqueName: \"kubernetes.io/projected/892faef6-e365-4978-aba2-d68e2f4ec29f-kube-api-access-2v5rv\") pod \"892faef6-e365-4978-aba2-d68e2f4ec29f\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.253255 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-utilities\") pod \"892faef6-e365-4978-aba2-d68e2f4ec29f\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.253302 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-catalog-content\") pod \"892faef6-e365-4978-aba2-d68e2f4ec29f\" (UID: \"892faef6-e365-4978-aba2-d68e2f4ec29f\") " Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.255922 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-utilities" (OuterVolumeSpecName: "utilities") pod "892faef6-e365-4978-aba2-d68e2f4ec29f" (UID: "892faef6-e365-4978-aba2-d68e2f4ec29f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.263486 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/892faef6-e365-4978-aba2-d68e2f4ec29f-kube-api-access-2v5rv" (OuterVolumeSpecName: "kube-api-access-2v5rv") pod "892faef6-e365-4978-aba2-d68e2f4ec29f" (UID: "892faef6-e365-4978-aba2-d68e2f4ec29f"). InnerVolumeSpecName "kube-api-access-2v5rv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.303009 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "892faef6-e365-4978-aba2-d68e2f4ec29f" (UID: "892faef6-e365-4978-aba2-d68e2f4ec29f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.355753 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v5rv\" (UniqueName: \"kubernetes.io/projected/892faef6-e365-4978-aba2-d68e2f4ec29f-kube-api-access-2v5rv\") on node \"crc\" DevicePath \"\"" Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.355780 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:27:01 crc kubenswrapper[4682]: I1210 11:27:01.355790 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892faef6-e365-4978-aba2-d68e2f4ec29f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.046672 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j6bh2" event={"ID":"892faef6-e365-4978-aba2-d68e2f4ec29f","Type":"ContainerDied","Data":"cc9990c04e0009901c16cdbd2dad86dd2fb336b5be4271262ed0539732b75255"} Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.046781 4682 scope.go:117] "RemoveContainer" containerID="34f5c6f6e73a09976ffff7e2d225e46efb30a01aed9a1fc187ef880441b1419b" Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.046781 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j6bh2" Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.075916 4682 scope.go:117] "RemoveContainer" containerID="6eca96da48d8bd940702dedb4b92ec7bb51ccb00fca5253df2f5c793bf992c70" Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.093628 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j6bh2"] Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.102607 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j6bh2"] Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.108984 4682 scope.go:117] "RemoveContainer" containerID="45ab9269e2dbfa41070ac45a8ef68ac0dd493368c641147bef39bf0e20480684" Dec 10 11:27:02 crc kubenswrapper[4682]: I1210 11:27:02.392870 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" path="/var/lib/kubelet/pods/892faef6-e365-4978-aba2-d68e2f4ec29f/volumes" Dec 10 11:27:08 crc kubenswrapper[4682]: I1210 11:27:08.380818 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:27:08 crc kubenswrapper[4682]: E1210 11:27:08.381673 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:27:10 crc kubenswrapper[4682]: E1210 11:27:10.391679 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:27:11 crc kubenswrapper[4682]: E1210 11:27:11.504978 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:27:11 crc kubenswrapper[4682]: E1210 11:27:11.505081 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:27:11 crc kubenswrapper[4682]: E1210 11:27:11.505348 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:27:11 crc kubenswrapper[4682]: E1210 11:27:11.506836 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:27:22 crc kubenswrapper[4682]: E1210 11:27:22.383477 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:27:22 crc kubenswrapper[4682]: E1210 11:27:22.384372 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:27:23 crc kubenswrapper[4682]: I1210 11:27:23.381814 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:27:23 crc kubenswrapper[4682]: E1210 11:27:23.382194 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:27:34 crc kubenswrapper[4682]: E1210 11:27:34.383049 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:27:36 crc kubenswrapper[4682]: E1210 11:27:36.384046 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:27:37 crc kubenswrapper[4682]: I1210 11:27:37.381328 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:27:37 crc kubenswrapper[4682]: E1210 11:27:37.381687 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:27:48 crc kubenswrapper[4682]: I1210 11:27:48.382421 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:27:48 crc kubenswrapper[4682]: E1210 11:27:48.384719 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:27:48 crc kubenswrapper[4682]: E1210 11:27:48.385819 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:27:49 crc kubenswrapper[4682]: E1210 11:27:49.383712 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:28:00 crc kubenswrapper[4682]: I1210 11:28:00.429144 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:28:00 crc kubenswrapper[4682]: E1210 11:28:00.429957 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:28:00 crc kubenswrapper[4682]: E1210 11:28:00.439853 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:28:03 crc kubenswrapper[4682]: E1210 11:28:03.382632 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:28:15 crc kubenswrapper[4682]: I1210 11:28:15.381532 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:28:15 crc kubenswrapper[4682]: E1210 11:28:15.382271 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:28:15 crc kubenswrapper[4682]: E1210 11:28:15.384894 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:28:16 crc kubenswrapper[4682]: E1210 11:28:16.383100 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:28:28 crc kubenswrapper[4682]: E1210 11:28:28.383772 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:28:29 crc kubenswrapper[4682]: E1210 11:28:29.384550 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:28:30 crc kubenswrapper[4682]: I1210 11:28:30.396252 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:28:30 crc kubenswrapper[4682]: E1210 11:28:30.397109 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:28:39 crc kubenswrapper[4682]: E1210 11:28:39.383291 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:28:40 crc kubenswrapper[4682]: E1210 11:28:40.389290 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:28:41 crc kubenswrapper[4682]: I1210 11:28:41.380657 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:28:42 crc kubenswrapper[4682]: I1210 11:28:42.154418 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"7d8e07c4a4f6b6557796b65395bdd55b09c3cc6bdb5b67dc125ce5fbb647c41e"} Dec 10 11:28:53 crc kubenswrapper[4682]: E1210 11:28:53.384868 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:28:54 crc kubenswrapper[4682]: E1210 11:28:54.383599 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:29:05 crc kubenswrapper[4682]: E1210 11:29:05.382721 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:29:06 crc kubenswrapper[4682]: E1210 11:29:06.382733 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.438663 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pc87w"] Dec 10 11:29:12 crc kubenswrapper[4682]: E1210 11:29:12.439667 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="extract-utilities" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.439683 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="extract-utilities" Dec 10 11:29:12 crc kubenswrapper[4682]: E1210 11:29:12.439718 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="extract-content" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.439725 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="extract-content" Dec 10 11:29:12 crc kubenswrapper[4682]: E1210 11:29:12.439747 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="registry-server" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.439756 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="registry-server" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.440036 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="892faef6-e365-4978-aba2-d68e2f4ec29f" containerName="registry-server" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.441974 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.466324 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pc87w"] Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.625930 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-catalog-content\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.626642 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-245n9\" (UniqueName: \"kubernetes.io/projected/708dd885-aee1-4183-8aaf-a564018fb0c1-kube-api-access-245n9\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.626729 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-utilities\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.728275 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-utilities\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.728406 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-catalog-content\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.728542 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-245n9\" (UniqueName: \"kubernetes.io/projected/708dd885-aee1-4183-8aaf-a564018fb0c1-kube-api-access-245n9\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.728954 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-utilities\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.728965 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-catalog-content\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.757626 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-245n9\" (UniqueName: \"kubernetes.io/projected/708dd885-aee1-4183-8aaf-a564018fb0c1-kube-api-access-245n9\") pod \"redhat-operators-pc87w\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:12 crc kubenswrapper[4682]: I1210 11:29:12.764561 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:13 crc kubenswrapper[4682]: I1210 11:29:13.247197 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pc87w"] Dec 10 11:29:13 crc kubenswrapper[4682]: I1210 11:29:13.500189 4682 generic.go:334] "Generic (PLEG): container finished" podID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerID="82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600" exitCode=0 Dec 10 11:29:13 crc kubenswrapper[4682]: I1210 11:29:13.500401 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerDied","Data":"82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600"} Dec 10 11:29:13 crc kubenswrapper[4682]: I1210 11:29:13.500622 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerStarted","Data":"fbf973d73a3024d07291143cd955a3af84eb7cce3f491e42b59e81badd537265"} Dec 10 11:29:15 crc kubenswrapper[4682]: I1210 11:29:15.521794 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerStarted","Data":"24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a"} Dec 10 11:29:18 crc kubenswrapper[4682]: E1210 11:29:18.392541 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:29:19 crc kubenswrapper[4682]: I1210 11:29:19.571551 4682 generic.go:334] "Generic (PLEG): container finished" podID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerID="24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a" exitCode=0 Dec 10 11:29:19 crc kubenswrapper[4682]: I1210 11:29:19.571709 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerDied","Data":"24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a"} Dec 10 11:29:20 crc kubenswrapper[4682]: E1210 11:29:20.419644 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:29:20 crc kubenswrapper[4682]: I1210 11:29:20.582094 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerStarted","Data":"ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf"} Dec 10 11:29:22 crc kubenswrapper[4682]: I1210 11:29:22.764654 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:22 crc kubenswrapper[4682]: I1210 11:29:22.765695 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:23 crc kubenswrapper[4682]: I1210 11:29:23.818946 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pc87w" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="registry-server" probeResult="failure" output=< Dec 10 11:29:23 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 11:29:23 crc kubenswrapper[4682]: > Dec 10 11:29:25 crc kubenswrapper[4682]: I1210 11:29:25.640412 4682 generic.go:334] "Generic (PLEG): container finished" podID="09844e48-f7bc-4c51-9dfa-dcc6daafb27f" containerID="58d50ef1b7d6d032fe99cf893a4d48888b18f8222b35db4bdf9b8ab37f4406f5" exitCode=2 Dec 10 11:29:25 crc kubenswrapper[4682]: I1210 11:29:25.640758 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" event={"ID":"09844e48-f7bc-4c51-9dfa-dcc6daafb27f","Type":"ContainerDied","Data":"58d50ef1b7d6d032fe99cf893a4d48888b18f8222b35db4bdf9b8ab37f4406f5"} Dec 10 11:29:25 crc kubenswrapper[4682]: I1210 11:29:25.671115 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pc87w" podStartSLOduration=7.085836863 podStartE2EDuration="13.671090404s" podCreationTimestamp="2025-12-10 11:29:12 +0000 UTC" firstStartedPulling="2025-12-10 11:29:13.502957047 +0000 UTC m=+2633.823167797" lastFinishedPulling="2025-12-10 11:29:20.088210578 +0000 UTC m=+2640.408421338" observedRunningTime="2025-12-10 11:29:20.614100209 +0000 UTC m=+2640.934310959" watchObservedRunningTime="2025-12-10 11:29:25.671090404 +0000 UTC m=+2645.991301154" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.241782 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.348630 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-ssh-key\") pod \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.348992 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scntr\" (UniqueName: \"kubernetes.io/projected/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-kube-api-access-scntr\") pod \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.349204 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-inventory\") pod \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\" (UID: \"09844e48-f7bc-4c51-9dfa-dcc6daafb27f\") " Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.355219 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-kube-api-access-scntr" (OuterVolumeSpecName: "kube-api-access-scntr") pod "09844e48-f7bc-4c51-9dfa-dcc6daafb27f" (UID: "09844e48-f7bc-4c51-9dfa-dcc6daafb27f"). InnerVolumeSpecName "kube-api-access-scntr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.379755 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "09844e48-f7bc-4c51-9dfa-dcc6daafb27f" (UID: "09844e48-f7bc-4c51-9dfa-dcc6daafb27f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.381008 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-inventory" (OuterVolumeSpecName: "inventory") pod "09844e48-f7bc-4c51-9dfa-dcc6daafb27f" (UID: "09844e48-f7bc-4c51-9dfa-dcc6daafb27f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.451130 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.451159 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.451168 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scntr\" (UniqueName: \"kubernetes.io/projected/09844e48-f7bc-4c51-9dfa-dcc6daafb27f-kube-api-access-scntr\") on node \"crc\" DevicePath \"\"" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.666433 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" event={"ID":"09844e48-f7bc-4c51-9dfa-dcc6daafb27f","Type":"ContainerDied","Data":"fde50bc0ca758d7adda649ff7a1042083734e6bd6056e26780e7636534083b61"} Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.666488 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fde50bc0ca758d7adda649ff7a1042083734e6bd6056e26780e7636534083b61" Dec 10 11:29:27 crc kubenswrapper[4682]: I1210 11:29:27.666543 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm" Dec 10 11:29:29 crc kubenswrapper[4682]: E1210 11:29:29.383494 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:29:32 crc kubenswrapper[4682]: I1210 11:29:32.889312 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:32 crc kubenswrapper[4682]: I1210 11:29:32.947675 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:33 crc kubenswrapper[4682]: I1210 11:29:33.138397 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pc87w"] Dec 10 11:29:33 crc kubenswrapper[4682]: E1210 11:29:33.383524 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:29:34 crc kubenswrapper[4682]: I1210 11:29:34.741653 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pc87w" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="registry-server" containerID="cri-o://ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf" gracePeriod=2 Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.354372 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.442751 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-catalog-content\") pod \"708dd885-aee1-4183-8aaf-a564018fb0c1\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.442827 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-245n9\" (UniqueName: \"kubernetes.io/projected/708dd885-aee1-4183-8aaf-a564018fb0c1-kube-api-access-245n9\") pod \"708dd885-aee1-4183-8aaf-a564018fb0c1\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.443077 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-utilities\") pod \"708dd885-aee1-4183-8aaf-a564018fb0c1\" (UID: \"708dd885-aee1-4183-8aaf-a564018fb0c1\") " Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.444745 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-utilities" (OuterVolumeSpecName: "utilities") pod "708dd885-aee1-4183-8aaf-a564018fb0c1" (UID: "708dd885-aee1-4183-8aaf-a564018fb0c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.448816 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/708dd885-aee1-4183-8aaf-a564018fb0c1-kube-api-access-245n9" (OuterVolumeSpecName: "kube-api-access-245n9") pod "708dd885-aee1-4183-8aaf-a564018fb0c1" (UID: "708dd885-aee1-4183-8aaf-a564018fb0c1"). InnerVolumeSpecName "kube-api-access-245n9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.545223 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.545265 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-245n9\" (UniqueName: \"kubernetes.io/projected/708dd885-aee1-4183-8aaf-a564018fb0c1-kube-api-access-245n9\") on node \"crc\" DevicePath \"\"" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.556814 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "708dd885-aee1-4183-8aaf-a564018fb0c1" (UID: "708dd885-aee1-4183-8aaf-a564018fb0c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.649017 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/708dd885-aee1-4183-8aaf-a564018fb0c1-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.759786 4682 generic.go:334] "Generic (PLEG): container finished" podID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerID="ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf" exitCode=0 Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.759842 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerDied","Data":"ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf"} Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.759887 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pc87w" event={"ID":"708dd885-aee1-4183-8aaf-a564018fb0c1","Type":"ContainerDied","Data":"fbf973d73a3024d07291143cd955a3af84eb7cce3f491e42b59e81badd537265"} Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.759901 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pc87w" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.759913 4682 scope.go:117] "RemoveContainer" containerID="ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.793180 4682 scope.go:117] "RemoveContainer" containerID="24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.833489 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pc87w"] Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.849329 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pc87w"] Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.853161 4682 scope.go:117] "RemoveContainer" containerID="82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.888389 4682 scope.go:117] "RemoveContainer" containerID="ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf" Dec 10 11:29:35 crc kubenswrapper[4682]: E1210 11:29:35.888839 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf\": container with ID starting with ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf not found: ID does not exist" containerID="ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.888886 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf"} err="failed to get container status \"ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf\": rpc error: code = NotFound desc = could not find container \"ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf\": container with ID starting with ee42cf6e508a1a0e93c1a7162c9325c9ad867092bdf5ea75d92eb729cd9fc8bf not found: ID does not exist" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.888914 4682 scope.go:117] "RemoveContainer" containerID="24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a" Dec 10 11:29:35 crc kubenswrapper[4682]: E1210 11:29:35.889221 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a\": container with ID starting with 24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a not found: ID does not exist" containerID="24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.889263 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a"} err="failed to get container status \"24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a\": rpc error: code = NotFound desc = could not find container \"24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a\": container with ID starting with 24868eda2d64cb202fbca7028670fbeec2d7eddc51eb728af9085190eec83a5a not found: ID does not exist" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.889290 4682 scope.go:117] "RemoveContainer" containerID="82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600" Dec 10 11:29:35 crc kubenswrapper[4682]: E1210 11:29:35.889587 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600\": container with ID starting with 82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600 not found: ID does not exist" containerID="82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600" Dec 10 11:29:35 crc kubenswrapper[4682]: I1210 11:29:35.889614 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600"} err="failed to get container status \"82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600\": rpc error: code = NotFound desc = could not find container \"82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600\": container with ID starting with 82569b44777b2baba1ff83e942a442727bf663e4c1a42f8b0005e86c23e46600 not found: ID does not exist" Dec 10 11:29:36 crc kubenswrapper[4682]: I1210 11:29:36.392186 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" path="/var/lib/kubelet/pods/708dd885-aee1-4183-8aaf-a564018fb0c1/volumes" Dec 10 11:29:40 crc kubenswrapper[4682]: E1210 11:29:40.388807 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.031352 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd"] Dec 10 11:29:45 crc kubenswrapper[4682]: E1210 11:29:45.032290 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="extract-content" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.032306 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="extract-content" Dec 10 11:29:45 crc kubenswrapper[4682]: E1210 11:29:45.032346 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="extract-utilities" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.032354 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="extract-utilities" Dec 10 11:29:45 crc kubenswrapper[4682]: E1210 11:29:45.032383 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="registry-server" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.032390 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="registry-server" Dec 10 11:29:45 crc kubenswrapper[4682]: E1210 11:29:45.032408 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09844e48-f7bc-4c51-9dfa-dcc6daafb27f" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.032416 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="09844e48-f7bc-4c51-9dfa-dcc6daafb27f" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.032677 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="09844e48-f7bc-4c51-9dfa-dcc6daafb27f" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.032703 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="708dd885-aee1-4183-8aaf-a564018fb0c1" containerName="registry-server" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.033582 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.036832 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.037311 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.037812 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.038503 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.040935 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd"] Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.067045 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.067129 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lk8mn\" (UniqueName: \"kubernetes.io/projected/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-kube-api-access-lk8mn\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.067366 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.170138 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.170238 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lk8mn\" (UniqueName: \"kubernetes.io/projected/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-kube-api-access-lk8mn\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.170323 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.176273 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.178005 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.202310 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lk8mn\" (UniqueName: \"kubernetes.io/projected/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-kube-api-access-lk8mn\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-89qdd\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.370350 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:29:45 crc kubenswrapper[4682]: I1210 11:29:45.966662 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd"] Dec 10 11:29:46 crc kubenswrapper[4682]: I1210 11:29:46.882007 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" event={"ID":"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330","Type":"ContainerStarted","Data":"12c6e08571e1e3263594d2eaab15f3426d481404ff1a1afbecb8fc10c82f583e"} Dec 10 11:29:47 crc kubenswrapper[4682]: E1210 11:29:47.401308 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:29:47 crc kubenswrapper[4682]: I1210 11:29:47.912872 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" event={"ID":"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330","Type":"ContainerStarted","Data":"ded2035bb56a122c89a4ac4494ca7005bf9912cfcfc505acd36a09f219da05cb"} Dec 10 11:29:47 crc kubenswrapper[4682]: I1210 11:29:47.939061 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" podStartSLOduration=2.158727887 podStartE2EDuration="2.939036562s" podCreationTimestamp="2025-12-10 11:29:45 +0000 UTC" firstStartedPulling="2025-12-10 11:29:45.976627984 +0000 UTC m=+2666.296838734" lastFinishedPulling="2025-12-10 11:29:46.756936639 +0000 UTC m=+2667.077147409" observedRunningTime="2025-12-10 11:29:47.927041791 +0000 UTC m=+2668.247252551" watchObservedRunningTime="2025-12-10 11:29:47.939036562 +0000 UTC m=+2668.259247322" Dec 10 11:29:51 crc kubenswrapper[4682]: E1210 11:29:51.382766 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.170965 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d"] Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.174340 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.177543 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.178350 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.213510 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d"] Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.303247 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3877c815-0840-4da9-a28b-6539f8c186a6-config-volume\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.303646 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3877c815-0840-4da9-a28b-6539f8c186a6-secret-volume\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.303801 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj6dz\" (UniqueName: \"kubernetes.io/projected/3877c815-0840-4da9-a28b-6539f8c186a6-kube-api-access-qj6dz\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.405622 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3877c815-0840-4da9-a28b-6539f8c186a6-secret-volume\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.405693 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj6dz\" (UniqueName: \"kubernetes.io/projected/3877c815-0840-4da9-a28b-6539f8c186a6-kube-api-access-qj6dz\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.405812 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3877c815-0840-4da9-a28b-6539f8c186a6-config-volume\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.406754 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3877c815-0840-4da9-a28b-6539f8c186a6-config-volume\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.417514 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3877c815-0840-4da9-a28b-6539f8c186a6-secret-volume\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.422256 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj6dz\" (UniqueName: \"kubernetes.io/projected/3877c815-0840-4da9-a28b-6539f8c186a6-kube-api-access-qj6dz\") pod \"collect-profiles-29422770-rv25d\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:00 crc kubenswrapper[4682]: I1210 11:30:00.508169 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:01 crc kubenswrapper[4682]: I1210 11:30:01.179227 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d"] Dec 10 11:30:01 crc kubenswrapper[4682]: W1210 11:30:01.181624 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3877c815_0840_4da9_a28b_6539f8c186a6.slice/crio-bbc05c892aa22831f471e94acb291cf3e7f2ace3e8c3e8eb86673c2472e60a6e WatchSource:0}: Error finding container bbc05c892aa22831f471e94acb291cf3e7f2ace3e8c3e8eb86673c2472e60a6e: Status 404 returned error can't find the container with id bbc05c892aa22831f471e94acb291cf3e7f2ace3e8c3e8eb86673c2472e60a6e Dec 10 11:30:02 crc kubenswrapper[4682]: I1210 11:30:02.056076 4682 generic.go:334] "Generic (PLEG): container finished" podID="3877c815-0840-4da9-a28b-6539f8c186a6" containerID="0d95e0e3da956db2990146c0229acf3a18a911823ff307bb9dd6e56b2ea44142" exitCode=0 Dec 10 11:30:02 crc kubenswrapper[4682]: I1210 11:30:02.056190 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" event={"ID":"3877c815-0840-4da9-a28b-6539f8c186a6","Type":"ContainerDied","Data":"0d95e0e3da956db2990146c0229acf3a18a911823ff307bb9dd6e56b2ea44142"} Dec 10 11:30:02 crc kubenswrapper[4682]: I1210 11:30:02.056607 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" event={"ID":"3877c815-0840-4da9-a28b-6539f8c186a6","Type":"ContainerStarted","Data":"bbc05c892aa22831f471e94acb291cf3e7f2ace3e8c3e8eb86673c2472e60a6e"} Dec 10 11:30:02 crc kubenswrapper[4682]: E1210 11:30:02.383194 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.463179 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.586130 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3877c815-0840-4da9-a28b-6539f8c186a6-secret-volume\") pod \"3877c815-0840-4da9-a28b-6539f8c186a6\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.586219 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3877c815-0840-4da9-a28b-6539f8c186a6-config-volume\") pod \"3877c815-0840-4da9-a28b-6539f8c186a6\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.587090 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3877c815-0840-4da9-a28b-6539f8c186a6-config-volume" (OuterVolumeSpecName: "config-volume") pod "3877c815-0840-4da9-a28b-6539f8c186a6" (UID: "3877c815-0840-4da9-a28b-6539f8c186a6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.587282 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj6dz\" (UniqueName: \"kubernetes.io/projected/3877c815-0840-4da9-a28b-6539f8c186a6-kube-api-access-qj6dz\") pod \"3877c815-0840-4da9-a28b-6539f8c186a6\" (UID: \"3877c815-0840-4da9-a28b-6539f8c186a6\") " Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.587718 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3877c815-0840-4da9-a28b-6539f8c186a6-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.594609 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3877c815-0840-4da9-a28b-6539f8c186a6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3877c815-0840-4da9-a28b-6539f8c186a6" (UID: "3877c815-0840-4da9-a28b-6539f8c186a6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.594759 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3877c815-0840-4da9-a28b-6539f8c186a6-kube-api-access-qj6dz" (OuterVolumeSpecName: "kube-api-access-qj6dz") pod "3877c815-0840-4da9-a28b-6539f8c186a6" (UID: "3877c815-0840-4da9-a28b-6539f8c186a6"). InnerVolumeSpecName "kube-api-access-qj6dz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.689159 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3877c815-0840-4da9-a28b-6539f8c186a6-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:30:03 crc kubenswrapper[4682]: I1210 11:30:03.689404 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj6dz\" (UniqueName: \"kubernetes.io/projected/3877c815-0840-4da9-a28b-6539f8c186a6-kube-api-access-qj6dz\") on node \"crc\" DevicePath \"\"" Dec 10 11:30:04 crc kubenswrapper[4682]: I1210 11:30:04.078075 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" event={"ID":"3877c815-0840-4da9-a28b-6539f8c186a6","Type":"ContainerDied","Data":"bbc05c892aa22831f471e94acb291cf3e7f2ace3e8c3e8eb86673c2472e60a6e"} Dec 10 11:30:04 crc kubenswrapper[4682]: I1210 11:30:04.078128 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbc05c892aa22831f471e94acb291cf3e7f2ace3e8c3e8eb86673c2472e60a6e" Dec 10 11:30:04 crc kubenswrapper[4682]: I1210 11:30:04.078222 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d" Dec 10 11:30:04 crc kubenswrapper[4682]: I1210 11:30:04.542393 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn"] Dec 10 11:30:04 crc kubenswrapper[4682]: I1210 11:30:04.555261 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422725-7kvsn"] Dec 10 11:30:05 crc kubenswrapper[4682]: E1210 11:30:05.398491 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:30:06 crc kubenswrapper[4682]: I1210 11:30:06.393102 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fe39f56-5b24-4b88-9cd6-02458b68986d" path="/var/lib/kubelet/pods/8fe39f56-5b24-4b88-9cd6-02458b68986d/volumes" Dec 10 11:30:15 crc kubenswrapper[4682]: E1210 11:30:15.384945 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:30:16 crc kubenswrapper[4682]: E1210 11:30:16.383242 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:30:28 crc kubenswrapper[4682]: E1210 11:30:28.384613 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:30:28 crc kubenswrapper[4682]: E1210 11:30:28.385644 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:30:34 crc kubenswrapper[4682]: I1210 11:30:34.825986 4682 scope.go:117] "RemoveContainer" containerID="c5781352bcfb12d8cc4dec9e9b7c7452635310d0efc18510eda644811a0c1dde" Dec 10 11:30:40 crc kubenswrapper[4682]: E1210 11:30:40.390772 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:30:43 crc kubenswrapper[4682]: E1210 11:30:43.383767 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:30:52 crc kubenswrapper[4682]: E1210 11:30:52.383592 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:30:56 crc kubenswrapper[4682]: E1210 11:30:56.383888 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:31:06 crc kubenswrapper[4682]: I1210 11:31:06.478724 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:31:06 crc kubenswrapper[4682]: I1210 11:31:06.479233 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:31:07 crc kubenswrapper[4682]: E1210 11:31:07.384342 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:31:11 crc kubenswrapper[4682]: E1210 11:31:11.382737 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:31:18 crc kubenswrapper[4682]: E1210 11:31:18.383945 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:31:22 crc kubenswrapper[4682]: E1210 11:31:22.382459 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.112808 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-662xw"] Dec 10 11:31:31 crc kubenswrapper[4682]: E1210 11:31:31.115011 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3877c815-0840-4da9-a28b-6539f8c186a6" containerName="collect-profiles" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.115112 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="3877c815-0840-4da9-a28b-6539f8c186a6" containerName="collect-profiles" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.115924 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="3877c815-0840-4da9-a28b-6539f8c186a6" containerName="collect-profiles" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.117616 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.131353 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-662xw"] Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.275638 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttflc\" (UniqueName: \"kubernetes.io/projected/e1063029-22fc-4aff-b7fd-496da43a6cdf-kube-api-access-ttflc\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.275795 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-utilities\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.275864 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-catalog-content\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.378329 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-catalog-content\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.378551 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttflc\" (UniqueName: \"kubernetes.io/projected/e1063029-22fc-4aff-b7fd-496da43a6cdf-kube-api-access-ttflc\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.378680 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-utilities\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.378964 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-catalog-content\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.379160 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-utilities\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.396431 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttflc\" (UniqueName: \"kubernetes.io/projected/e1063029-22fc-4aff-b7fd-496da43a6cdf-kube-api-access-ttflc\") pod \"redhat-marketplace-662xw\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.450864 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:31 crc kubenswrapper[4682]: I1210 11:31:31.955002 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-662xw"] Dec 10 11:31:32 crc kubenswrapper[4682]: I1210 11:31:32.943342 4682 generic.go:334] "Generic (PLEG): container finished" podID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerID="fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58" exitCode=0 Dec 10 11:31:32 crc kubenswrapper[4682]: I1210 11:31:32.943411 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerDied","Data":"fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58"} Dec 10 11:31:32 crc kubenswrapper[4682]: I1210 11:31:32.943798 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerStarted","Data":"5450e2d81f81681e31e36492429cab854cbd539fdde74151cf44d439bd12e1cb"} Dec 10 11:31:33 crc kubenswrapper[4682]: E1210 11:31:33.382558 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:31:33 crc kubenswrapper[4682]: E1210 11:31:33.383539 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:31:33 crc kubenswrapper[4682]: I1210 11:31:33.956175 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerStarted","Data":"ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b"} Dec 10 11:31:34 crc kubenswrapper[4682]: I1210 11:31:34.966923 4682 generic.go:334] "Generic (PLEG): container finished" podID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerID="ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b" exitCode=0 Dec 10 11:31:34 crc kubenswrapper[4682]: I1210 11:31:34.967138 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerDied","Data":"ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b"} Dec 10 11:31:35 crc kubenswrapper[4682]: I1210 11:31:35.986147 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerStarted","Data":"ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532"} Dec 10 11:31:36 crc kubenswrapper[4682]: I1210 11:31:36.004741 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-662xw" podStartSLOduration=2.535012945 podStartE2EDuration="5.004708773s" podCreationTimestamp="2025-12-10 11:31:31 +0000 UTC" firstStartedPulling="2025-12-10 11:31:32.948135531 +0000 UTC m=+2773.268346321" lastFinishedPulling="2025-12-10 11:31:35.417831399 +0000 UTC m=+2775.738042149" observedRunningTime="2025-12-10 11:31:36.003656979 +0000 UTC m=+2776.323867729" watchObservedRunningTime="2025-12-10 11:31:36.004708773 +0000 UTC m=+2776.324919523" Dec 10 11:31:36 crc kubenswrapper[4682]: I1210 11:31:36.478512 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:31:36 crc kubenswrapper[4682]: I1210 11:31:36.478594 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:31:41 crc kubenswrapper[4682]: I1210 11:31:41.451541 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:41 crc kubenswrapper[4682]: I1210 11:31:41.452121 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:41 crc kubenswrapper[4682]: I1210 11:31:41.507698 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:42 crc kubenswrapper[4682]: I1210 11:31:42.130119 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:42 crc kubenswrapper[4682]: I1210 11:31:42.192505 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-662xw"] Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.099336 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-662xw" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="registry-server" containerID="cri-o://ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532" gracePeriod=2 Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.642412 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.898728 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-utilities\") pod \"e1063029-22fc-4aff-b7fd-496da43a6cdf\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.898888 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-catalog-content\") pod \"e1063029-22fc-4aff-b7fd-496da43a6cdf\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.899005 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttflc\" (UniqueName: \"kubernetes.io/projected/e1063029-22fc-4aff-b7fd-496da43a6cdf-kube-api-access-ttflc\") pod \"e1063029-22fc-4aff-b7fd-496da43a6cdf\" (UID: \"e1063029-22fc-4aff-b7fd-496da43a6cdf\") " Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.900502 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-utilities" (OuterVolumeSpecName: "utilities") pod "e1063029-22fc-4aff-b7fd-496da43a6cdf" (UID: "e1063029-22fc-4aff-b7fd-496da43a6cdf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.902034 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.907987 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1063029-22fc-4aff-b7fd-496da43a6cdf-kube-api-access-ttflc" (OuterVolumeSpecName: "kube-api-access-ttflc") pod "e1063029-22fc-4aff-b7fd-496da43a6cdf" (UID: "e1063029-22fc-4aff-b7fd-496da43a6cdf"). InnerVolumeSpecName "kube-api-access-ttflc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:31:44 crc kubenswrapper[4682]: I1210 11:31:44.923943 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e1063029-22fc-4aff-b7fd-496da43a6cdf" (UID: "e1063029-22fc-4aff-b7fd-496da43a6cdf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.004932 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1063029-22fc-4aff-b7fd-496da43a6cdf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.004971 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttflc\" (UniqueName: \"kubernetes.io/projected/e1063029-22fc-4aff-b7fd-496da43a6cdf-kube-api-access-ttflc\") on node \"crc\" DevicePath \"\"" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.110800 4682 generic.go:334] "Generic (PLEG): container finished" podID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerID="ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532" exitCode=0 Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.110847 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerDied","Data":"ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532"} Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.110879 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-662xw" event={"ID":"e1063029-22fc-4aff-b7fd-496da43a6cdf","Type":"ContainerDied","Data":"5450e2d81f81681e31e36492429cab854cbd539fdde74151cf44d439bd12e1cb"} Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.110897 4682 scope.go:117] "RemoveContainer" containerID="ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.110890 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-662xw" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.132682 4682 scope.go:117] "RemoveContainer" containerID="ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.167453 4682 scope.go:117] "RemoveContainer" containerID="fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.180285 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-662xw"] Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.192648 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-662xw"] Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.207582 4682 scope.go:117] "RemoveContainer" containerID="ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532" Dec 10 11:31:45 crc kubenswrapper[4682]: E1210 11:31:45.208110 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532\": container with ID starting with ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532 not found: ID does not exist" containerID="ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.208160 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532"} err="failed to get container status \"ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532\": rpc error: code = NotFound desc = could not find container \"ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532\": container with ID starting with ab08bb7310f1a5a668fd79dff0496bc6e44edd618dbd93ff4e822ea15221f532 not found: ID does not exist" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.208190 4682 scope.go:117] "RemoveContainer" containerID="ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b" Dec 10 11:31:45 crc kubenswrapper[4682]: E1210 11:31:45.208644 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b\": container with ID starting with ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b not found: ID does not exist" containerID="ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.208688 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b"} err="failed to get container status \"ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b\": rpc error: code = NotFound desc = could not find container \"ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b\": container with ID starting with ee78cccee721f9fec7eb7299994b278c7420d420a9c870d79c5cb88f21e49e2b not found: ID does not exist" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.208719 4682 scope.go:117] "RemoveContainer" containerID="fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58" Dec 10 11:31:45 crc kubenswrapper[4682]: E1210 11:31:45.209209 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58\": container with ID starting with fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58 not found: ID does not exist" containerID="fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58" Dec 10 11:31:45 crc kubenswrapper[4682]: I1210 11:31:45.209283 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58"} err="failed to get container status \"fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58\": rpc error: code = NotFound desc = could not find container \"fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58\": container with ID starting with fb94aec79ca26d3bbd142331d0451ed452268ed94128481eec9e1361c5cf3e58 not found: ID does not exist" Dec 10 11:31:46 crc kubenswrapper[4682]: I1210 11:31:46.398026 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" path="/var/lib/kubelet/pods/e1063029-22fc-4aff-b7fd-496da43a6cdf/volumes" Dec 10 11:31:48 crc kubenswrapper[4682]: E1210 11:31:48.385544 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:31:48 crc kubenswrapper[4682]: E1210 11:31:48.385642 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:32:00 crc kubenswrapper[4682]: I1210 11:32:00.391121 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:32:00 crc kubenswrapper[4682]: E1210 11:32:00.499640 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:32:00 crc kubenswrapper[4682]: E1210 11:32:00.499712 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:32:00 crc kubenswrapper[4682]: E1210 11:32:00.499864 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:32:00 crc kubenswrapper[4682]: E1210 11:32:00.501178 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:32:03 crc kubenswrapper[4682]: E1210 11:32:03.385547 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:32:06 crc kubenswrapper[4682]: I1210 11:32:06.478936 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:32:06 crc kubenswrapper[4682]: I1210 11:32:06.479577 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:32:06 crc kubenswrapper[4682]: I1210 11:32:06.479653 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:32:06 crc kubenswrapper[4682]: I1210 11:32:06.481033 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7d8e07c4a4f6b6557796b65395bdd55b09c3cc6bdb5b67dc125ce5fbb647c41e"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:32:06 crc kubenswrapper[4682]: I1210 11:32:06.481137 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://7d8e07c4a4f6b6557796b65395bdd55b09c3cc6bdb5b67dc125ce5fbb647c41e" gracePeriod=600 Dec 10 11:32:07 crc kubenswrapper[4682]: I1210 11:32:07.321147 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="7d8e07c4a4f6b6557796b65395bdd55b09c3cc6bdb5b67dc125ce5fbb647c41e" exitCode=0 Dec 10 11:32:07 crc kubenswrapper[4682]: I1210 11:32:07.321222 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"7d8e07c4a4f6b6557796b65395bdd55b09c3cc6bdb5b67dc125ce5fbb647c41e"} Dec 10 11:32:07 crc kubenswrapper[4682]: I1210 11:32:07.321718 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5"} Dec 10 11:32:07 crc kubenswrapper[4682]: I1210 11:32:07.321743 4682 scope.go:117] "RemoveContainer" containerID="c172546b692392aebbd4c1337bf6df0ee8d9a36ddd32ef7955d99225842d61b0" Dec 10 11:32:13 crc kubenswrapper[4682]: E1210 11:32:13.382693 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:32:17 crc kubenswrapper[4682]: E1210 11:32:17.512118 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:32:17 crc kubenswrapper[4682]: E1210 11:32:17.512601 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:32:17 crc kubenswrapper[4682]: E1210 11:32:17.512720 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:32:17 crc kubenswrapper[4682]: E1210 11:32:17.514163 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:32:27 crc kubenswrapper[4682]: E1210 11:32:27.383641 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:32:31 crc kubenswrapper[4682]: E1210 11:32:31.384194 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:32:39 crc kubenswrapper[4682]: E1210 11:32:39.385178 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:32:44 crc kubenswrapper[4682]: E1210 11:32:44.388328 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:32:50 crc kubenswrapper[4682]: E1210 11:32:50.389078 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:32:56 crc kubenswrapper[4682]: E1210 11:32:56.383716 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:33:03 crc kubenswrapper[4682]: E1210 11:33:03.383806 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.405240 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7rc8s"] Dec 10 11:33:10 crc kubenswrapper[4682]: E1210 11:33:10.406561 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="extract-content" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.406579 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="extract-content" Dec 10 11:33:10 crc kubenswrapper[4682]: E1210 11:33:10.406628 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="extract-utilities" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.406636 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="extract-utilities" Dec 10 11:33:10 crc kubenswrapper[4682]: E1210 11:33:10.406644 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="registry-server" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.406650 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="registry-server" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.406953 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1063029-22fc-4aff-b7fd-496da43a6cdf" containerName="registry-server" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.408681 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7rc8s"] Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.408796 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.577422 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsx4z\" (UniqueName: \"kubernetes.io/projected/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-kube-api-access-rsx4z\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.577593 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-utilities\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.577706 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-catalog-content\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.679903 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsx4z\" (UniqueName: \"kubernetes.io/projected/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-kube-api-access-rsx4z\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.680003 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-utilities\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.680059 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-catalog-content\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.680587 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-catalog-content\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.680791 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-utilities\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.708190 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsx4z\" (UniqueName: \"kubernetes.io/projected/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-kube-api-access-rsx4z\") pod \"certified-operators-7rc8s\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:10 crc kubenswrapper[4682]: I1210 11:33:10.737332 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:11 crc kubenswrapper[4682]: E1210 11:33:11.383782 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:33:11 crc kubenswrapper[4682]: I1210 11:33:11.391761 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7rc8s"] Dec 10 11:33:11 crc kubenswrapper[4682]: W1210 11:33:11.397652 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7d31360_61f8_4ac3_a496_0c4b92f3d5e7.slice/crio-aa908f2fc4217e94b01316632f4e2034b412af181c67f3f6674f0b3a0846340f WatchSource:0}: Error finding container aa908f2fc4217e94b01316632f4e2034b412af181c67f3f6674f0b3a0846340f: Status 404 returned error can't find the container with id aa908f2fc4217e94b01316632f4e2034b412af181c67f3f6674f0b3a0846340f Dec 10 11:33:11 crc kubenswrapper[4682]: I1210 11:33:11.995131 4682 generic.go:334] "Generic (PLEG): container finished" podID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerID="f7186d274f2dbea1c1b1ee85587e7e3579c3d9bf8056522d04699f60e1753ce2" exitCode=0 Dec 10 11:33:11 crc kubenswrapper[4682]: I1210 11:33:11.995191 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerDied","Data":"f7186d274f2dbea1c1b1ee85587e7e3579c3d9bf8056522d04699f60e1753ce2"} Dec 10 11:33:11 crc kubenswrapper[4682]: I1210 11:33:11.995513 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerStarted","Data":"aa908f2fc4217e94b01316632f4e2034b412af181c67f3f6674f0b3a0846340f"} Dec 10 11:33:14 crc kubenswrapper[4682]: I1210 11:33:14.018728 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerStarted","Data":"24be360cd6e5fbd943a89a3a4bd3e8041e0210b0156c081f92e388e60e118419"} Dec 10 11:33:14 crc kubenswrapper[4682]: E1210 11:33:14.382512 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:33:16 crc kubenswrapper[4682]: I1210 11:33:16.038189 4682 generic.go:334] "Generic (PLEG): container finished" podID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerID="24be360cd6e5fbd943a89a3a4bd3e8041e0210b0156c081f92e388e60e118419" exitCode=0 Dec 10 11:33:16 crc kubenswrapper[4682]: I1210 11:33:16.038273 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerDied","Data":"24be360cd6e5fbd943a89a3a4bd3e8041e0210b0156c081f92e388e60e118419"} Dec 10 11:33:16 crc kubenswrapper[4682]: I1210 11:33:16.293716 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-7f7fc58469-rvhd4" podUID="ec3a169e-4679-409e-a778-f88b4972abf8" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 10 11:33:17 crc kubenswrapper[4682]: I1210 11:33:17.050188 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerStarted","Data":"fedeb3891d100ad3402a47304d10ce634fa05b289b3c929ea5c076425ac4de52"} Dec 10 11:33:17 crc kubenswrapper[4682]: I1210 11:33:17.074527 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7rc8s" podStartSLOduration=2.52393133 podStartE2EDuration="7.074503228s" podCreationTimestamp="2025-12-10 11:33:10 +0000 UTC" firstStartedPulling="2025-12-10 11:33:11.998562872 +0000 UTC m=+2872.318773622" lastFinishedPulling="2025-12-10 11:33:16.54913477 +0000 UTC m=+2876.869345520" observedRunningTime="2025-12-10 11:33:17.069964715 +0000 UTC m=+2877.390175475" watchObservedRunningTime="2025-12-10 11:33:17.074503228 +0000 UTC m=+2877.394713988" Dec 10 11:33:20 crc kubenswrapper[4682]: I1210 11:33:20.739700 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:20 crc kubenswrapper[4682]: I1210 11:33:20.741125 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:20 crc kubenswrapper[4682]: I1210 11:33:20.817811 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:21 crc kubenswrapper[4682]: I1210 11:33:21.171041 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:21 crc kubenswrapper[4682]: I1210 11:33:21.280937 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7rc8s"] Dec 10 11:33:23 crc kubenswrapper[4682]: I1210 11:33:23.109921 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7rc8s" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="registry-server" containerID="cri-o://fedeb3891d100ad3402a47304d10ce634fa05b289b3c929ea5c076425ac4de52" gracePeriod=2 Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.123230 4682 generic.go:334] "Generic (PLEG): container finished" podID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerID="fedeb3891d100ad3402a47304d10ce634fa05b289b3c929ea5c076425ac4de52" exitCode=0 Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.123432 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerDied","Data":"fedeb3891d100ad3402a47304d10ce634fa05b289b3c929ea5c076425ac4de52"} Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.123542 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7rc8s" event={"ID":"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7","Type":"ContainerDied","Data":"aa908f2fc4217e94b01316632f4e2034b412af181c67f3f6674f0b3a0846340f"} Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.123558 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa908f2fc4217e94b01316632f4e2034b412af181c67f3f6674f0b3a0846340f" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.206845 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.337758 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-utilities\") pod \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.338237 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsx4z\" (UniqueName: \"kubernetes.io/projected/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-kube-api-access-rsx4z\") pod \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.338349 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-catalog-content\") pod \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\" (UID: \"b7d31360-61f8-4ac3-a496-0c4b92f3d5e7\") " Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.338779 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-utilities" (OuterVolumeSpecName: "utilities") pod "b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" (UID: "b7d31360-61f8-4ac3-a496-0c4b92f3d5e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.344786 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-kube-api-access-rsx4z" (OuterVolumeSpecName: "kube-api-access-rsx4z") pod "b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" (UID: "b7d31360-61f8-4ac3-a496-0c4b92f3d5e7"). InnerVolumeSpecName "kube-api-access-rsx4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.348960 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsx4z\" (UniqueName: \"kubernetes.io/projected/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-kube-api-access-rsx4z\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.349000 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:24 crc kubenswrapper[4682]: E1210 11:33:24.384311 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.397385 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" (UID: "b7d31360-61f8-4ac3-a496-0c4b92f3d5e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:33:24 crc kubenswrapper[4682]: I1210 11:33:24.451639 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:33:25 crc kubenswrapper[4682]: I1210 11:33:25.132129 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7rc8s" Dec 10 11:33:25 crc kubenswrapper[4682]: I1210 11:33:25.163146 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7rc8s"] Dec 10 11:33:25 crc kubenswrapper[4682]: I1210 11:33:25.242447 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7rc8s"] Dec 10 11:33:25 crc kubenswrapper[4682]: E1210 11:33:25.383106 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:33:26 crc kubenswrapper[4682]: I1210 11:33:26.393989 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" path="/var/lib/kubelet/pods/b7d31360-61f8-4ac3-a496-0c4b92f3d5e7/volumes" Dec 10 11:33:36 crc kubenswrapper[4682]: E1210 11:33:36.383883 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:33:37 crc kubenswrapper[4682]: E1210 11:33:37.383383 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:33:49 crc kubenswrapper[4682]: E1210 11:33:49.383665 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:33:49 crc kubenswrapper[4682]: E1210 11:33:49.383915 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:34:03 crc kubenswrapper[4682]: E1210 11:34:03.383331 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:34:04 crc kubenswrapper[4682]: E1210 11:34:04.383707 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:34:06 crc kubenswrapper[4682]: I1210 11:34:06.478537 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:34:06 crc kubenswrapper[4682]: I1210 11:34:06.478794 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:34:15 crc kubenswrapper[4682]: E1210 11:34:15.382593 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:34:15 crc kubenswrapper[4682]: E1210 11:34:15.384438 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:34:26 crc kubenswrapper[4682]: E1210 11:34:26.385376 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:34:30 crc kubenswrapper[4682]: E1210 11:34:30.391192 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:34:36 crc kubenswrapper[4682]: I1210 11:34:36.478560 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:34:36 crc kubenswrapper[4682]: I1210 11:34:36.479103 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:34:41 crc kubenswrapper[4682]: E1210 11:34:41.382997 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:34:42 crc kubenswrapper[4682]: E1210 11:34:42.382230 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:34:52 crc kubenswrapper[4682]: E1210 11:34:52.382151 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:34:56 crc kubenswrapper[4682]: E1210 11:34:56.383612 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:35:06 crc kubenswrapper[4682]: I1210 11:35:06.478987 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:35:06 crc kubenswrapper[4682]: I1210 11:35:06.479787 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:35:06 crc kubenswrapper[4682]: I1210 11:35:06.479866 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:35:06 crc kubenswrapper[4682]: I1210 11:35:06.481053 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:35:06 crc kubenswrapper[4682]: I1210 11:35:06.481160 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" gracePeriod=600 Dec 10 11:35:06 crc kubenswrapper[4682]: E1210 11:35:06.603208 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:35:07 crc kubenswrapper[4682]: I1210 11:35:07.117542 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" exitCode=0 Dec 10 11:35:07 crc kubenswrapper[4682]: I1210 11:35:07.117628 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5"} Dec 10 11:35:07 crc kubenswrapper[4682]: I1210 11:35:07.118095 4682 scope.go:117] "RemoveContainer" containerID="7d8e07c4a4f6b6557796b65395bdd55b09c3cc6bdb5b67dc125ce5fbb647c41e" Dec 10 11:35:07 crc kubenswrapper[4682]: I1210 11:35:07.118794 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:35:07 crc kubenswrapper[4682]: E1210 11:35:07.119114 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:35:07 crc kubenswrapper[4682]: E1210 11:35:07.383551 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:35:11 crc kubenswrapper[4682]: E1210 11:35:11.383167 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:35:19 crc kubenswrapper[4682]: I1210 11:35:19.381685 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:35:19 crc kubenswrapper[4682]: E1210 11:35:19.382749 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:35:19 crc kubenswrapper[4682]: E1210 11:35:19.383434 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:35:26 crc kubenswrapper[4682]: E1210 11:35:26.397938 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:35:31 crc kubenswrapper[4682]: I1210 11:35:31.380617 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:35:31 crc kubenswrapper[4682]: E1210 11:35:31.381338 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:35:34 crc kubenswrapper[4682]: E1210 11:35:34.385379 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:35:38 crc kubenswrapper[4682]: E1210 11:35:38.383081 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:35:43 crc kubenswrapper[4682]: I1210 11:35:43.380852 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:35:43 crc kubenswrapper[4682]: E1210 11:35:43.382662 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:35:45 crc kubenswrapper[4682]: E1210 11:35:45.383554 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:35:53 crc kubenswrapper[4682]: E1210 11:35:53.384037 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:35:56 crc kubenswrapper[4682]: E1210 11:35:56.384672 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:35:58 crc kubenswrapper[4682]: I1210 11:35:58.381530 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:35:58 crc kubenswrapper[4682]: E1210 11:35:58.382024 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:36:02 crc kubenswrapper[4682]: I1210 11:36:02.760640 4682 generic.go:334] "Generic (PLEG): container finished" podID="8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" containerID="ded2035bb56a122c89a4ac4494ca7005bf9912cfcfc505acd36a09f219da05cb" exitCode=2 Dec 10 11:36:02 crc kubenswrapper[4682]: I1210 11:36:02.761186 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" event={"ID":"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330","Type":"ContainerDied","Data":"ded2035bb56a122c89a4ac4494ca7005bf9912cfcfc505acd36a09f219da05cb"} Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.263909 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.328920 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lk8mn\" (UniqueName: \"kubernetes.io/projected/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-kube-api-access-lk8mn\") pod \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.329207 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-ssh-key\") pod \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.329327 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-inventory\") pod \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\" (UID: \"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330\") " Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.334381 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-kube-api-access-lk8mn" (OuterVolumeSpecName: "kube-api-access-lk8mn") pod "8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" (UID: "8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330"). InnerVolumeSpecName "kube-api-access-lk8mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.357626 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" (UID: "8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.358185 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-inventory" (OuterVolumeSpecName: "inventory") pod "8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" (UID: "8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.432240 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lk8mn\" (UniqueName: \"kubernetes.io/projected/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-kube-api-access-lk8mn\") on node \"crc\" DevicePath \"\"" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.432427 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.432661 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.781054 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" event={"ID":"8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330","Type":"ContainerDied","Data":"12c6e08571e1e3263594d2eaab15f3426d481404ff1a1afbecb8fc10c82f583e"} Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.781098 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12c6e08571e1e3263594d2eaab15f3426d481404ff1a1afbecb8fc10c82f583e" Dec 10 11:36:04 crc kubenswrapper[4682]: I1210 11:36:04.781183 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-89qdd" Dec 10 11:36:07 crc kubenswrapper[4682]: E1210 11:36:07.384109 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:36:09 crc kubenswrapper[4682]: I1210 11:36:09.381529 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:36:09 crc kubenswrapper[4682]: E1210 11:36:09.382082 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:36:12 crc kubenswrapper[4682]: E1210 11:36:12.383096 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:36:19 crc kubenswrapper[4682]: E1210 11:36:19.384254 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:36:23 crc kubenswrapper[4682]: I1210 11:36:23.380863 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:36:23 crc kubenswrapper[4682]: E1210 11:36:23.381676 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:36:23 crc kubenswrapper[4682]: E1210 11:36:23.382954 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:36:34 crc kubenswrapper[4682]: E1210 11:36:34.383457 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:36:34 crc kubenswrapper[4682]: E1210 11:36:34.385444 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:36:38 crc kubenswrapper[4682]: I1210 11:36:38.381792 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:36:38 crc kubenswrapper[4682]: E1210 11:36:38.382687 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.033715 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn"] Dec 10 11:36:42 crc kubenswrapper[4682]: E1210 11:36:42.034673 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="registry-server" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.034687 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="registry-server" Dec 10 11:36:42 crc kubenswrapper[4682]: E1210 11:36:42.034713 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="extract-content" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.034721 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="extract-content" Dec 10 11:36:42 crc kubenswrapper[4682]: E1210 11:36:42.034752 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="extract-utilities" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.034759 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="extract-utilities" Dec 10 11:36:42 crc kubenswrapper[4682]: E1210 11:36:42.034772 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.034778 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.035040 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.035070 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7d31360-61f8-4ac3-a496-0c4b92f3d5e7" containerName="registry-server" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.036036 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.038569 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.042838 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn"] Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.044119 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.044149 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.044545 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.206564 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.207031 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.207514 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rn77\" (UniqueName: \"kubernetes.io/projected/bdc567ce-9075-470e-867a-ffd15f55c152-kube-api-access-9rn77\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.310041 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.310208 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.310296 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rn77\" (UniqueName: \"kubernetes.io/projected/bdc567ce-9075-470e-867a-ffd15f55c152-kube-api-access-9rn77\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.316206 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.324662 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.327812 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rn77\" (UniqueName: \"kubernetes.io/projected/bdc567ce-9075-470e-867a-ffd15f55c152-kube-api-access-9rn77\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.357887 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:36:42 crc kubenswrapper[4682]: I1210 11:36:42.935852 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn"] Dec 10 11:36:43 crc kubenswrapper[4682]: I1210 11:36:43.153886 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" event={"ID":"bdc567ce-9075-470e-867a-ffd15f55c152","Type":"ContainerStarted","Data":"b850293d3737adb86ecf036b3d18569b4fbf67be13b8ee9380dcae68940f2f3b"} Dec 10 11:36:44 crc kubenswrapper[4682]: I1210 11:36:44.164452 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" event={"ID":"bdc567ce-9075-470e-867a-ffd15f55c152","Type":"ContainerStarted","Data":"2123c752bac2e2a128c35ec3bfebbbe757ea86ea07699a928358458136bbbf9b"} Dec 10 11:36:44 crc kubenswrapper[4682]: I1210 11:36:44.192971 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" podStartSLOduration=1.699456656 podStartE2EDuration="2.192950101s" podCreationTimestamp="2025-12-10 11:36:42 +0000 UTC" firstStartedPulling="2025-12-10 11:36:42.947850991 +0000 UTC m=+3083.268061761" lastFinishedPulling="2025-12-10 11:36:43.441344456 +0000 UTC m=+3083.761555206" observedRunningTime="2025-12-10 11:36:44.182191993 +0000 UTC m=+3084.502402743" watchObservedRunningTime="2025-12-10 11:36:44.192950101 +0000 UTC m=+3084.513160861" Dec 10 11:36:45 crc kubenswrapper[4682]: E1210 11:36:45.382694 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:36:49 crc kubenswrapper[4682]: E1210 11:36:49.394279 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.380964 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:36:51 crc kubenswrapper[4682]: E1210 11:36:51.381630 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.740454 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n2xdm"] Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.743311 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.757842 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n2xdm"] Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.909957 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkzr4\" (UniqueName: \"kubernetes.io/projected/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-kube-api-access-mkzr4\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.910091 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-utilities\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:51 crc kubenswrapper[4682]: I1210 11:36:51.910235 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-catalog-content\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.011558 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkzr4\" (UniqueName: \"kubernetes.io/projected/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-kube-api-access-mkzr4\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.011666 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-utilities\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.011761 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-catalog-content\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.012265 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-catalog-content\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.013024 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-utilities\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.034246 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkzr4\" (UniqueName: \"kubernetes.io/projected/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-kube-api-access-mkzr4\") pod \"community-operators-n2xdm\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.068074 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:36:52 crc kubenswrapper[4682]: I1210 11:36:52.618706 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n2xdm"] Dec 10 11:36:53 crc kubenswrapper[4682]: I1210 11:36:53.308595 4682 generic.go:334] "Generic (PLEG): container finished" podID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerID="1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1" exitCode=0 Dec 10 11:36:53 crc kubenswrapper[4682]: I1210 11:36:53.308713 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerDied","Data":"1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1"} Dec 10 11:36:53 crc kubenswrapper[4682]: I1210 11:36:53.308943 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerStarted","Data":"ebe1d1c4573a47d576cc275f6a6904ee92a4842a89c5da28270668f7ff1c07c7"} Dec 10 11:36:55 crc kubenswrapper[4682]: I1210 11:36:55.331393 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerStarted","Data":"8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f"} Dec 10 11:36:56 crc kubenswrapper[4682]: I1210 11:36:56.346139 4682 generic.go:334] "Generic (PLEG): container finished" podID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerID="8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f" exitCode=0 Dec 10 11:36:56 crc kubenswrapper[4682]: I1210 11:36:56.346280 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerDied","Data":"8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f"} Dec 10 11:36:57 crc kubenswrapper[4682]: I1210 11:36:57.358738 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerStarted","Data":"0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4"} Dec 10 11:36:57 crc kubenswrapper[4682]: I1210 11:36:57.375787 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n2xdm" podStartSLOduration=2.836515533 podStartE2EDuration="6.375772984s" podCreationTimestamp="2025-12-10 11:36:51 +0000 UTC" firstStartedPulling="2025-12-10 11:36:53.311400172 +0000 UTC m=+3093.631610922" lastFinishedPulling="2025-12-10 11:36:56.850657623 +0000 UTC m=+3097.170868373" observedRunningTime="2025-12-10 11:36:57.373493802 +0000 UTC m=+3097.693704552" watchObservedRunningTime="2025-12-10 11:36:57.375772984 +0000 UTC m=+3097.695983724" Dec 10 11:36:59 crc kubenswrapper[4682]: E1210 11:36:59.383215 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:37:02 crc kubenswrapper[4682]: I1210 11:37:02.069222 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:37:02 crc kubenswrapper[4682]: I1210 11:37:02.069632 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:37:02 crc kubenswrapper[4682]: I1210 11:37:02.124418 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:37:02 crc kubenswrapper[4682]: I1210 11:37:02.448561 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:37:02 crc kubenswrapper[4682]: I1210 11:37:02.502806 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n2xdm"] Dec 10 11:37:03 crc kubenswrapper[4682]: E1210 11:37:03.383716 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:37:04 crc kubenswrapper[4682]: I1210 11:37:04.420861 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n2xdm" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="registry-server" containerID="cri-o://0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4" gracePeriod=2 Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.380961 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:37:05 crc kubenswrapper[4682]: E1210 11:37:05.381986 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.382331 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.431335 4682 generic.go:334] "Generic (PLEG): container finished" podID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerID="0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4" exitCode=0 Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.431382 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n2xdm" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.431386 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerDied","Data":"0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4"} Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.431419 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n2xdm" event={"ID":"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a","Type":"ContainerDied","Data":"ebe1d1c4573a47d576cc275f6a6904ee92a4842a89c5da28270668f7ff1c07c7"} Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.431441 4682 scope.go:117] "RemoveContainer" containerID="0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.452118 4682 scope.go:117] "RemoveContainer" containerID="8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.472629 4682 scope.go:117] "RemoveContainer" containerID="1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.503457 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkzr4\" (UniqueName: \"kubernetes.io/projected/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-kube-api-access-mkzr4\") pod \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.503700 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-utilities\") pod \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.503958 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-catalog-content\") pod \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\" (UID: \"746fe9d4-2d10-4926-9ba0-bb745e3f4e0a\") " Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.505511 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-utilities" (OuterVolumeSpecName: "utilities") pod "746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" (UID: "746fe9d4-2d10-4926-9ba0-bb745e3f4e0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.510688 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-kube-api-access-mkzr4" (OuterVolumeSpecName: "kube-api-access-mkzr4") pod "746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" (UID: "746fe9d4-2d10-4926-9ba0-bb745e3f4e0a"). InnerVolumeSpecName "kube-api-access-mkzr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.522861 4682 scope.go:117] "RemoveContainer" containerID="0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4" Dec 10 11:37:05 crc kubenswrapper[4682]: E1210 11:37:05.523257 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4\": container with ID starting with 0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4 not found: ID does not exist" containerID="0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.523313 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4"} err="failed to get container status \"0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4\": rpc error: code = NotFound desc = could not find container \"0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4\": container with ID starting with 0012f02d4c6bfde118bbb8a45b6ce2e876f9a783cdc7edffb39fc0c55c629cf4 not found: ID does not exist" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.523350 4682 scope.go:117] "RemoveContainer" containerID="8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f" Dec 10 11:37:05 crc kubenswrapper[4682]: E1210 11:37:05.523727 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f\": container with ID starting with 8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f not found: ID does not exist" containerID="8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.523761 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f"} err="failed to get container status \"8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f\": rpc error: code = NotFound desc = could not find container \"8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f\": container with ID starting with 8bc9a84813dec9c6b7e4486f7c3d3cdba9625d1e6e65d2045a088b76b5dfd50f not found: ID does not exist" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.523782 4682 scope.go:117] "RemoveContainer" containerID="1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1" Dec 10 11:37:05 crc kubenswrapper[4682]: E1210 11:37:05.524002 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1\": container with ID starting with 1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1 not found: ID does not exist" containerID="1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.524035 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1"} err="failed to get container status \"1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1\": rpc error: code = NotFound desc = could not find container \"1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1\": container with ID starting with 1365a24170a15889716c9a52969fda6b898e1a97c0fd0ad2902db82f6116e8f1 not found: ID does not exist" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.554148 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" (UID: "746fe9d4-2d10-4926-9ba0-bb745e3f4e0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.607178 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.607214 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkzr4\" (UniqueName: \"kubernetes.io/projected/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-kube-api-access-mkzr4\") on node \"crc\" DevicePath \"\"" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.607225 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.779171 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n2xdm"] Dec 10 11:37:05 crc kubenswrapper[4682]: I1210 11:37:05.787966 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n2xdm"] Dec 10 11:37:06 crc kubenswrapper[4682]: I1210 11:37:06.392519 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" path="/var/lib/kubelet/pods/746fe9d4-2d10-4926-9ba0-bb745e3f4e0a/volumes" Dec 10 11:37:13 crc kubenswrapper[4682]: I1210 11:37:13.385316 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:37:13 crc kubenswrapper[4682]: E1210 11:37:13.507994 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:37:13 crc kubenswrapper[4682]: E1210 11:37:13.508056 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:37:13 crc kubenswrapper[4682]: E1210 11:37:13.508199 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:37:13 crc kubenswrapper[4682]: E1210 11:37:13.509392 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:37:14 crc kubenswrapper[4682]: E1210 11:37:14.382860 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:37:19 crc kubenswrapper[4682]: I1210 11:37:19.381709 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:37:19 crc kubenswrapper[4682]: E1210 11:37:19.382577 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:37:25 crc kubenswrapper[4682]: E1210 11:37:25.517145 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:37:25 crc kubenswrapper[4682]: E1210 11:37:25.517567 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:37:25 crc kubenswrapper[4682]: E1210 11:37:25.517683 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:37:25 crc kubenswrapper[4682]: E1210 11:37:25.519384 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:37:26 crc kubenswrapper[4682]: E1210 11:37:26.390302 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:37:34 crc kubenswrapper[4682]: I1210 11:37:34.381182 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:37:34 crc kubenswrapper[4682]: E1210 11:37:34.381883 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:37:38 crc kubenswrapper[4682]: E1210 11:37:38.384659 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:37:41 crc kubenswrapper[4682]: E1210 11:37:41.382385 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:37:47 crc kubenswrapper[4682]: I1210 11:37:47.381688 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:37:47 crc kubenswrapper[4682]: E1210 11:37:47.382651 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:37:52 crc kubenswrapper[4682]: E1210 11:37:52.385357 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:37:53 crc kubenswrapper[4682]: E1210 11:37:53.383267 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:37:59 crc kubenswrapper[4682]: I1210 11:37:59.380594 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:37:59 crc kubenswrapper[4682]: E1210 11:37:59.381311 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:38:06 crc kubenswrapper[4682]: E1210 11:38:06.383194 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:38:07 crc kubenswrapper[4682]: E1210 11:38:07.383825 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:38:14 crc kubenswrapper[4682]: I1210 11:38:14.381207 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:38:14 crc kubenswrapper[4682]: E1210 11:38:14.381926 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:38:20 crc kubenswrapper[4682]: E1210 11:38:20.390514 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:38:20 crc kubenswrapper[4682]: E1210 11:38:20.393043 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:38:26 crc kubenswrapper[4682]: I1210 11:38:26.381948 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:38:26 crc kubenswrapper[4682]: E1210 11:38:26.382762 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:38:31 crc kubenswrapper[4682]: E1210 11:38:31.383271 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:38:33 crc kubenswrapper[4682]: E1210 11:38:33.382608 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:38:41 crc kubenswrapper[4682]: I1210 11:38:41.382101 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:38:41 crc kubenswrapper[4682]: E1210 11:38:41.383871 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:38:43 crc kubenswrapper[4682]: E1210 11:38:43.383315 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:38:47 crc kubenswrapper[4682]: E1210 11:38:47.383215 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:38:53 crc kubenswrapper[4682]: I1210 11:38:53.381058 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:38:53 crc kubenswrapper[4682]: E1210 11:38:53.382258 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:38:57 crc kubenswrapper[4682]: E1210 11:38:57.383722 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:38:59 crc kubenswrapper[4682]: E1210 11:38:59.382754 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:39:08 crc kubenswrapper[4682]: I1210 11:39:08.381869 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:39:08 crc kubenswrapper[4682]: E1210 11:39:08.382646 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:39:08 crc kubenswrapper[4682]: E1210 11:39:08.383545 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:39:11 crc kubenswrapper[4682]: E1210 11:39:11.384815 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:39:19 crc kubenswrapper[4682]: E1210 11:39:19.383774 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:39:22 crc kubenswrapper[4682]: I1210 11:39:22.381197 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:39:22 crc kubenswrapper[4682]: E1210 11:39:22.382030 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:39:26 crc kubenswrapper[4682]: E1210 11:39:26.384360 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:39:32 crc kubenswrapper[4682]: E1210 11:39:32.383600 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:39:35 crc kubenswrapper[4682]: I1210 11:39:35.084563 4682 scope.go:117] "RemoveContainer" containerID="24be360cd6e5fbd943a89a3a4bd3e8041e0210b0156c081f92e388e60e118419" Dec 10 11:39:35 crc kubenswrapper[4682]: I1210 11:39:35.137009 4682 scope.go:117] "RemoveContainer" containerID="fedeb3891d100ad3402a47304d10ce634fa05b289b3c929ea5c076425ac4de52" Dec 10 11:39:35 crc kubenswrapper[4682]: I1210 11:39:35.195087 4682 scope.go:117] "RemoveContainer" containerID="f7186d274f2dbea1c1b1ee85587e7e3579c3d9bf8056522d04699f60e1753ce2" Dec 10 11:39:35 crc kubenswrapper[4682]: I1210 11:39:35.381740 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:39:35 crc kubenswrapper[4682]: E1210 11:39:35.382245 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:39:38 crc kubenswrapper[4682]: E1210 11:39:38.385680 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:39:43 crc kubenswrapper[4682]: E1210 11:39:43.383584 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:39:50 crc kubenswrapper[4682]: I1210 11:39:50.390830 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:39:50 crc kubenswrapper[4682]: E1210 11:39:50.391634 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:39:52 crc kubenswrapper[4682]: E1210 11:39:52.383727 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:39:55 crc kubenswrapper[4682]: E1210 11:39:55.394256 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:40:05 crc kubenswrapper[4682]: I1210 11:40:05.381702 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:40:05 crc kubenswrapper[4682]: E1210 11:40:05.382622 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:40:06 crc kubenswrapper[4682]: E1210 11:40:06.382524 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:40:09 crc kubenswrapper[4682]: E1210 11:40:09.383008 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:40:16 crc kubenswrapper[4682]: I1210 11:40:16.382453 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:40:17 crc kubenswrapper[4682]: I1210 11:40:17.374323 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"6e39aa4df01d4732c79541b988dca57f788a157df81d4b776c192d30c6b06276"} Dec 10 11:40:19 crc kubenswrapper[4682]: E1210 11:40:19.382929 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:40:21 crc kubenswrapper[4682]: E1210 11:40:21.383646 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:40:30 crc kubenswrapper[4682]: E1210 11:40:30.389270 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:40:34 crc kubenswrapper[4682]: E1210 11:40:34.385580 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:40:41 crc kubenswrapper[4682]: E1210 11:40:41.383819 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:40:47 crc kubenswrapper[4682]: E1210 11:40:47.382751 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:40:55 crc kubenswrapper[4682]: E1210 11:40:55.383061 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:40:58 crc kubenswrapper[4682]: E1210 11:40:58.382869 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:41:07 crc kubenswrapper[4682]: E1210 11:41:07.382683 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:41:09 crc kubenswrapper[4682]: E1210 11:41:09.382722 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:41:19 crc kubenswrapper[4682]: E1210 11:41:19.384515 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:41:21 crc kubenswrapper[4682]: E1210 11:41:21.383916 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:41:31 crc kubenswrapper[4682]: E1210 11:41:31.382646 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:41:32 crc kubenswrapper[4682]: E1210 11:41:32.382900 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:41:45 crc kubenswrapper[4682]: E1210 11:41:45.383173 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:41:45 crc kubenswrapper[4682]: E1210 11:41:45.383197 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:41:58 crc kubenswrapper[4682]: E1210 11:41:58.383310 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:41:58 crc kubenswrapper[4682]: E1210 11:41:58.383508 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:42:09 crc kubenswrapper[4682]: E1210 11:42:09.383392 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:42:12 crc kubenswrapper[4682]: E1210 11:42:12.383919 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:42:21 crc kubenswrapper[4682]: E1210 11:42:21.385687 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:42:26 crc kubenswrapper[4682]: I1210 11:42:26.384601 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:42:26 crc kubenswrapper[4682]: E1210 11:42:26.509126 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:42:26 crc kubenswrapper[4682]: E1210 11:42:26.509203 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:42:26 crc kubenswrapper[4682]: E1210 11:42:26.509365 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:42:26 crc kubenswrapper[4682]: E1210 11:42:26.511355 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:42:36 crc kubenswrapper[4682]: I1210 11:42:36.478895 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:42:36 crc kubenswrapper[4682]: I1210 11:42:36.479719 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:42:36 crc kubenswrapper[4682]: E1210 11:42:36.511456 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:42:36 crc kubenswrapper[4682]: E1210 11:42:36.511776 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:42:36 crc kubenswrapper[4682]: E1210 11:42:36.511903 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:42:36 crc kubenswrapper[4682]: E1210 11:42:36.513202 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:42:38 crc kubenswrapper[4682]: E1210 11:42:38.384522 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.556863 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6vngv"] Dec 10 11:42:41 crc kubenswrapper[4682]: E1210 11:42:41.588264 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="registry-server" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.588306 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="registry-server" Dec 10 11:42:41 crc kubenswrapper[4682]: E1210 11:42:41.592391 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="extract-content" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.592410 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="extract-content" Dec 10 11:42:41 crc kubenswrapper[4682]: E1210 11:42:41.592537 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="extract-utilities" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.592551 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="extract-utilities" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.593483 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="746fe9d4-2d10-4926-9ba0-bb745e3f4e0a" containerName="registry-server" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.604115 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.609912 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vngv"] Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.701094 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-utilities\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.701249 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-catalog-content\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.701663 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp7fj\" (UniqueName: \"kubernetes.io/projected/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-kube-api-access-mp7fj\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.804449 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-utilities\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.804631 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-catalog-content\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.804791 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp7fj\" (UniqueName: \"kubernetes.io/projected/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-kube-api-access-mp7fj\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.805081 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-utilities\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.805104 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-catalog-content\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.829796 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp7fj\" (UniqueName: \"kubernetes.io/projected/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-kube-api-access-mp7fj\") pod \"redhat-marketplace-6vngv\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:41 crc kubenswrapper[4682]: I1210 11:42:41.950583 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:42 crc kubenswrapper[4682]: W1210 11:42:42.470053 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd4fbf75_ff5b_49a4_87e0_c6d46d5f8daf.slice/crio-c796f67cd8bc18ee36f4a5257c36490e985d93129c945f0f0123da9811fb998c WatchSource:0}: Error finding container c796f67cd8bc18ee36f4a5257c36490e985d93129c945f0f0123da9811fb998c: Status 404 returned error can't find the container with id c796f67cd8bc18ee36f4a5257c36490e985d93129c945f0f0123da9811fb998c Dec 10 11:42:42 crc kubenswrapper[4682]: I1210 11:42:42.480345 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vngv"] Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.362738 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pvmjn"] Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.370517 4682 generic.go:334] "Generic (PLEG): container finished" podID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerID="5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350" exitCode=0 Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.374138 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vngv" event={"ID":"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf","Type":"ContainerDied","Data":"5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350"} Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.374245 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vngv" event={"ID":"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf","Type":"ContainerStarted","Data":"c796f67cd8bc18ee36f4a5257c36490e985d93129c945f0f0123da9811fb998c"} Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.374418 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.377430 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvmjn"] Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.458916 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-utilities\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.458962 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj4wn\" (UniqueName: \"kubernetes.io/projected/c949758c-013e-4b2c-94c3-88d387f6bd7e-kube-api-access-sj4wn\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.459098 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-catalog-content\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.561491 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-utilities\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.561589 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj4wn\" (UniqueName: \"kubernetes.io/projected/c949758c-013e-4b2c-94c3-88d387f6bd7e-kube-api-access-sj4wn\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.561794 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-catalog-content\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.562149 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-utilities\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.562503 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-catalog-content\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.595393 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj4wn\" (UniqueName: \"kubernetes.io/projected/c949758c-013e-4b2c-94c3-88d387f6bd7e-kube-api-access-sj4wn\") pod \"redhat-operators-pvmjn\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:43 crc kubenswrapper[4682]: I1210 11:42:43.719939 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:44 crc kubenswrapper[4682]: W1210 11:42:44.180728 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc949758c_013e_4b2c_94c3_88d387f6bd7e.slice/crio-906d42750dc84574f3f3d9dadcf6fc59b2c0db79cb325317072d34e132a7ea92 WatchSource:0}: Error finding container 906d42750dc84574f3f3d9dadcf6fc59b2c0db79cb325317072d34e132a7ea92: Status 404 returned error can't find the container with id 906d42750dc84574f3f3d9dadcf6fc59b2c0db79cb325317072d34e132a7ea92 Dec 10 11:42:44 crc kubenswrapper[4682]: I1210 11:42:44.182097 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvmjn"] Dec 10 11:42:44 crc kubenswrapper[4682]: I1210 11:42:44.404340 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerStarted","Data":"906d42750dc84574f3f3d9dadcf6fc59b2c0db79cb325317072d34e132a7ea92"} Dec 10 11:42:45 crc kubenswrapper[4682]: I1210 11:42:45.435643 4682 generic.go:334] "Generic (PLEG): container finished" podID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerID="e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8" exitCode=0 Dec 10 11:42:45 crc kubenswrapper[4682]: I1210 11:42:45.436166 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerDied","Data":"e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8"} Dec 10 11:42:45 crc kubenswrapper[4682]: I1210 11:42:45.441741 4682 generic.go:334] "Generic (PLEG): container finished" podID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerID="ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2" exitCode=0 Dec 10 11:42:45 crc kubenswrapper[4682]: I1210 11:42:45.442042 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vngv" event={"ID":"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf","Type":"ContainerDied","Data":"ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2"} Dec 10 11:42:47 crc kubenswrapper[4682]: I1210 11:42:47.467434 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vngv" event={"ID":"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf","Type":"ContainerStarted","Data":"c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f"} Dec 10 11:42:47 crc kubenswrapper[4682]: I1210 11:42:47.471215 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerStarted","Data":"c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05"} Dec 10 11:42:47 crc kubenswrapper[4682]: I1210 11:42:47.491534 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6vngv" podStartSLOduration=3.39007434 podStartE2EDuration="6.491514561s" podCreationTimestamp="2025-12-10 11:42:41 +0000 UTC" firstStartedPulling="2025-12-10 11:42:43.385992023 +0000 UTC m=+3443.706202823" lastFinishedPulling="2025-12-10 11:42:46.487432254 +0000 UTC m=+3446.807643044" observedRunningTime="2025-12-10 11:42:47.486612818 +0000 UTC m=+3447.806823568" watchObservedRunningTime="2025-12-10 11:42:47.491514561 +0000 UTC m=+3447.811725311" Dec 10 11:42:48 crc kubenswrapper[4682]: E1210 11:42:48.383424 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:42:49 crc kubenswrapper[4682]: I1210 11:42:49.500669 4682 generic.go:334] "Generic (PLEG): container finished" podID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerID="c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05" exitCode=0 Dec 10 11:42:49 crc kubenswrapper[4682]: I1210 11:42:49.500725 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerDied","Data":"c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05"} Dec 10 11:42:51 crc kubenswrapper[4682]: I1210 11:42:51.951033 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:51 crc kubenswrapper[4682]: I1210 11:42:51.951656 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:52 crc kubenswrapper[4682]: I1210 11:42:52.019148 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:52 crc kubenswrapper[4682]: E1210 11:42:52.382385 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:42:52 crc kubenswrapper[4682]: I1210 11:42:52.531347 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerStarted","Data":"6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453"} Dec 10 11:42:52 crc kubenswrapper[4682]: I1210 11:42:52.556626 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pvmjn" podStartSLOduration=3.588161283 podStartE2EDuration="9.556604913s" podCreationTimestamp="2025-12-10 11:42:43 +0000 UTC" firstStartedPulling="2025-12-10 11:42:45.438748068 +0000 UTC m=+3445.758958818" lastFinishedPulling="2025-12-10 11:42:51.407191658 +0000 UTC m=+3451.727402448" observedRunningTime="2025-12-10 11:42:52.548694345 +0000 UTC m=+3452.868905135" watchObservedRunningTime="2025-12-10 11:42:52.556604913 +0000 UTC m=+3452.876815663" Dec 10 11:42:52 crc kubenswrapper[4682]: I1210 11:42:52.605656 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:53 crc kubenswrapper[4682]: I1210 11:42:53.721238 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:53 crc kubenswrapper[4682]: I1210 11:42:53.721653 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:42:54 crc kubenswrapper[4682]: I1210 11:42:54.780395 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvmjn" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="registry-server" probeResult="failure" output=< Dec 10 11:42:54 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 11:42:54 crc kubenswrapper[4682]: > Dec 10 11:42:56 crc kubenswrapper[4682]: I1210 11:42:56.737569 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vngv"] Dec 10 11:42:56 crc kubenswrapper[4682]: I1210 11:42:56.738325 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6vngv" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="registry-server" containerID="cri-o://c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f" gracePeriod=2 Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.250167 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.381725 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-utilities\") pod \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.381848 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-catalog-content\") pod \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.382105 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mp7fj\" (UniqueName: \"kubernetes.io/projected/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-kube-api-access-mp7fj\") pod \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\" (UID: \"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf\") " Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.382449 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-utilities" (OuterVolumeSpecName: "utilities") pod "bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" (UID: "bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.382576 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.391784 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-kube-api-access-mp7fj" (OuterVolumeSpecName: "kube-api-access-mp7fj") pod "bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" (UID: "bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf"). InnerVolumeSpecName "kube-api-access-mp7fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.411583 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" (UID: "bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.484047 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mp7fj\" (UniqueName: \"kubernetes.io/projected/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-kube-api-access-mp7fj\") on node \"crc\" DevicePath \"\"" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.484090 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.595256 4682 generic.go:334] "Generic (PLEG): container finished" podID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerID="c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f" exitCode=0 Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.595307 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vngv" event={"ID":"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf","Type":"ContainerDied","Data":"c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f"} Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.595339 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vngv" event={"ID":"bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf","Type":"ContainerDied","Data":"c796f67cd8bc18ee36f4a5257c36490e985d93129c945f0f0123da9811fb998c"} Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.595346 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vngv" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.595382 4682 scope.go:117] "RemoveContainer" containerID="c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.626199 4682 scope.go:117] "RemoveContainer" containerID="ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.636251 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vngv"] Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.647964 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vngv"] Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.655530 4682 scope.go:117] "RemoveContainer" containerID="5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.706685 4682 scope.go:117] "RemoveContainer" containerID="c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f" Dec 10 11:42:57 crc kubenswrapper[4682]: E1210 11:42:57.707236 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f\": container with ID starting with c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f not found: ID does not exist" containerID="c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.707294 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f"} err="failed to get container status \"c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f\": rpc error: code = NotFound desc = could not find container \"c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f\": container with ID starting with c369b0340d36f6131a0f4e075e2c2f6369d61d705ece8e71d3fb8cd3c0ad3c6f not found: ID does not exist" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.707337 4682 scope.go:117] "RemoveContainer" containerID="ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2" Dec 10 11:42:57 crc kubenswrapper[4682]: E1210 11:42:57.707640 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2\": container with ID starting with ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2 not found: ID does not exist" containerID="ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.707673 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2"} err="failed to get container status \"ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2\": rpc error: code = NotFound desc = could not find container \"ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2\": container with ID starting with ccb52b8b6be9c8887cc47442bf04f39000f5c8e6528a4ae03351f8c590cda2e2 not found: ID does not exist" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.707693 4682 scope.go:117] "RemoveContainer" containerID="5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350" Dec 10 11:42:57 crc kubenswrapper[4682]: E1210 11:42:57.707969 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350\": container with ID starting with 5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350 not found: ID does not exist" containerID="5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350" Dec 10 11:42:57 crc kubenswrapper[4682]: I1210 11:42:57.708007 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350"} err="failed to get container status \"5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350\": rpc error: code = NotFound desc = could not find container \"5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350\": container with ID starting with 5212f0e3d7307fb28c05e625ac5be15d44dbdd905494938f69beb56425ac7350 not found: ID does not exist" Dec 10 11:42:58 crc kubenswrapper[4682]: I1210 11:42:58.407377 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" path="/var/lib/kubelet/pods/bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf/volumes" Dec 10 11:42:59 crc kubenswrapper[4682]: E1210 11:42:59.384183 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:43:02 crc kubenswrapper[4682]: I1210 11:43:02.669628 4682 generic.go:334] "Generic (PLEG): container finished" podID="bdc567ce-9075-470e-867a-ffd15f55c152" containerID="2123c752bac2e2a128c35ec3bfebbbe757ea86ea07699a928358458136bbbf9b" exitCode=2 Dec 10 11:43:02 crc kubenswrapper[4682]: I1210 11:43:02.669702 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" event={"ID":"bdc567ce-9075-470e-867a-ffd15f55c152","Type":"ContainerDied","Data":"2123c752bac2e2a128c35ec3bfebbbe757ea86ea07699a928358458136bbbf9b"} Dec 10 11:43:03 crc kubenswrapper[4682]: E1210 11:43:03.385682 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:43:03 crc kubenswrapper[4682]: I1210 11:43:03.790551 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:43:03 crc kubenswrapper[4682]: I1210 11:43:03.867097 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.027453 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvmjn"] Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.208682 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.356226 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-ssh-key\") pod \"bdc567ce-9075-470e-867a-ffd15f55c152\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.356450 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rn77\" (UniqueName: \"kubernetes.io/projected/bdc567ce-9075-470e-867a-ffd15f55c152-kube-api-access-9rn77\") pod \"bdc567ce-9075-470e-867a-ffd15f55c152\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.356744 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-inventory\") pod \"bdc567ce-9075-470e-867a-ffd15f55c152\" (UID: \"bdc567ce-9075-470e-867a-ffd15f55c152\") " Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.363752 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdc567ce-9075-470e-867a-ffd15f55c152-kube-api-access-9rn77" (OuterVolumeSpecName: "kube-api-access-9rn77") pod "bdc567ce-9075-470e-867a-ffd15f55c152" (UID: "bdc567ce-9075-470e-867a-ffd15f55c152"). InnerVolumeSpecName "kube-api-access-9rn77". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.411802 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bdc567ce-9075-470e-867a-ffd15f55c152" (UID: "bdc567ce-9075-470e-867a-ffd15f55c152"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.414259 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-inventory" (OuterVolumeSpecName: "inventory") pod "bdc567ce-9075-470e-867a-ffd15f55c152" (UID: "bdc567ce-9075-470e-867a-ffd15f55c152"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.459988 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.460033 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rn77\" (UniqueName: \"kubernetes.io/projected/bdc567ce-9075-470e-867a-ffd15f55c152-kube-api-access-9rn77\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.460055 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdc567ce-9075-470e-867a-ffd15f55c152-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.694695 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" event={"ID":"bdc567ce-9075-470e-867a-ffd15f55c152","Type":"ContainerDied","Data":"b850293d3737adb86ecf036b3d18569b4fbf67be13b8ee9380dcae68940f2f3b"} Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.694741 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b850293d3737adb86ecf036b3d18569b4fbf67be13b8ee9380dcae68940f2f3b" Dec 10 11:43:04 crc kubenswrapper[4682]: I1210 11:43:04.695166 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn" Dec 10 11:43:05 crc kubenswrapper[4682]: I1210 11:43:05.707709 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pvmjn" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="registry-server" containerID="cri-o://6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453" gracePeriod=2 Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.276707 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.425910 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-utilities\") pod \"c949758c-013e-4b2c-94c3-88d387f6bd7e\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.426002 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sj4wn\" (UniqueName: \"kubernetes.io/projected/c949758c-013e-4b2c-94c3-88d387f6bd7e-kube-api-access-sj4wn\") pod \"c949758c-013e-4b2c-94c3-88d387f6bd7e\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.426267 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-catalog-content\") pod \"c949758c-013e-4b2c-94c3-88d387f6bd7e\" (UID: \"c949758c-013e-4b2c-94c3-88d387f6bd7e\") " Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.427130 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-utilities" (OuterVolumeSpecName: "utilities") pod "c949758c-013e-4b2c-94c3-88d387f6bd7e" (UID: "c949758c-013e-4b2c-94c3-88d387f6bd7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.432257 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c949758c-013e-4b2c-94c3-88d387f6bd7e-kube-api-access-sj4wn" (OuterVolumeSpecName: "kube-api-access-sj4wn") pod "c949758c-013e-4b2c-94c3-88d387f6bd7e" (UID: "c949758c-013e-4b2c-94c3-88d387f6bd7e"). InnerVolumeSpecName "kube-api-access-sj4wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.480552 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.480655 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.529303 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.529339 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sj4wn\" (UniqueName: \"kubernetes.io/projected/c949758c-013e-4b2c-94c3-88d387f6bd7e-kube-api-access-sj4wn\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.595992 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c949758c-013e-4b2c-94c3-88d387f6bd7e" (UID: "c949758c-013e-4b2c-94c3-88d387f6bd7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.631178 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c949758c-013e-4b2c-94c3-88d387f6bd7e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.729930 4682 generic.go:334] "Generic (PLEG): container finished" podID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerID="6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453" exitCode=0 Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.730038 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvmjn" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.730072 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerDied","Data":"6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453"} Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.730549 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvmjn" event={"ID":"c949758c-013e-4b2c-94c3-88d387f6bd7e","Type":"ContainerDied","Data":"906d42750dc84574f3f3d9dadcf6fc59b2c0db79cb325317072d34e132a7ea92"} Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.730583 4682 scope.go:117] "RemoveContainer" containerID="6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.781737 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvmjn"] Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.787566 4682 scope.go:117] "RemoveContainer" containerID="c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.789770 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pvmjn"] Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.812652 4682 scope.go:117] "RemoveContainer" containerID="e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.880139 4682 scope.go:117] "RemoveContainer" containerID="6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453" Dec 10 11:43:06 crc kubenswrapper[4682]: E1210 11:43:06.881006 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453\": container with ID starting with 6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453 not found: ID does not exist" containerID="6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.881044 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453"} err="failed to get container status \"6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453\": rpc error: code = NotFound desc = could not find container \"6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453\": container with ID starting with 6c5b6ce54366aee2d24103205d75357a90b51a0443bbc7a03049607d41c21453 not found: ID does not exist" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.881072 4682 scope.go:117] "RemoveContainer" containerID="c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05" Dec 10 11:43:06 crc kubenswrapper[4682]: E1210 11:43:06.881916 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05\": container with ID starting with c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05 not found: ID does not exist" containerID="c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.881959 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05"} err="failed to get container status \"c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05\": rpc error: code = NotFound desc = could not find container \"c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05\": container with ID starting with c1db5f4d84a740508e7edf5cf31bca9105108a61b088d5d7204d371aa6598a05 not found: ID does not exist" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.881978 4682 scope.go:117] "RemoveContainer" containerID="e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8" Dec 10 11:43:06 crc kubenswrapper[4682]: E1210 11:43:06.885071 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8\": container with ID starting with e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8 not found: ID does not exist" containerID="e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8" Dec 10 11:43:06 crc kubenswrapper[4682]: I1210 11:43:06.885110 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8"} err="failed to get container status \"e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8\": rpc error: code = NotFound desc = could not find container \"e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8\": container with ID starting with e37282badef8426ec35f9c61fa055bc3524ab0a0268e4849805ecebf1d0de3c8 not found: ID does not exist" Dec 10 11:43:08 crc kubenswrapper[4682]: I1210 11:43:08.414565 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" path="/var/lib/kubelet/pods/c949758c-013e-4b2c-94c3-88d387f6bd7e/volumes" Dec 10 11:43:12 crc kubenswrapper[4682]: E1210 11:43:12.385188 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:43:14 crc kubenswrapper[4682]: E1210 11:43:14.385178 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:43:26 crc kubenswrapper[4682]: E1210 11:43:26.385566 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:43:27 crc kubenswrapper[4682]: E1210 11:43:27.383553 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:43:36 crc kubenswrapper[4682]: I1210 11:43:36.479589 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:43:36 crc kubenswrapper[4682]: I1210 11:43:36.480236 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:43:36 crc kubenswrapper[4682]: I1210 11:43:36.480308 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:43:36 crc kubenswrapper[4682]: I1210 11:43:36.481524 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6e39aa4df01d4732c79541b988dca57f788a157df81d4b776c192d30c6b06276"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:43:36 crc kubenswrapper[4682]: I1210 11:43:36.481623 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://6e39aa4df01d4732c79541b988dca57f788a157df81d4b776c192d30c6b06276" gracePeriod=600 Dec 10 11:43:37 crc kubenswrapper[4682]: I1210 11:43:37.077273 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="6e39aa4df01d4732c79541b988dca57f788a157df81d4b776c192d30c6b06276" exitCode=0 Dec 10 11:43:37 crc kubenswrapper[4682]: I1210 11:43:37.077394 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"6e39aa4df01d4732c79541b988dca57f788a157df81d4b776c192d30c6b06276"} Dec 10 11:43:37 crc kubenswrapper[4682]: I1210 11:43:37.077716 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a"} Dec 10 11:43:37 crc kubenswrapper[4682]: I1210 11:43:37.077746 4682 scope.go:117] "RemoveContainer" containerID="04c443faa86b799d379b561b931bcf1ff523becd5ca76861109dc59af90925b5" Dec 10 11:43:38 crc kubenswrapper[4682]: E1210 11:43:38.384706 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:43:41 crc kubenswrapper[4682]: E1210 11:43:41.383978 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:43:50 crc kubenswrapper[4682]: E1210 11:43:50.389091 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:43:52 crc kubenswrapper[4682]: E1210 11:43:52.383248 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:44:02 crc kubenswrapper[4682]: E1210 11:44:02.383939 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:44:04 crc kubenswrapper[4682]: E1210 11:44:04.383441 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:44:14 crc kubenswrapper[4682]: E1210 11:44:14.389726 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:44:16 crc kubenswrapper[4682]: E1210 11:44:16.384153 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.042522 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb"] Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043595 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="extract-utilities" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043614 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="extract-utilities" Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043637 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="extract-content" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043650 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="extract-content" Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043662 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="registry-server" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043673 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="registry-server" Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043719 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="extract-utilities" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043727 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="extract-utilities" Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043745 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="registry-server" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043753 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="registry-server" Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043768 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdc567ce-9075-470e-867a-ffd15f55c152" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043777 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdc567ce-9075-470e-867a-ffd15f55c152" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:44:22 crc kubenswrapper[4682]: E1210 11:44:22.043794 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="extract-content" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.043802 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="extract-content" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.044073 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd4fbf75-ff5b-49a4-87e0-c6d46d5f8daf" containerName="registry-server" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.044090 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c949758c-013e-4b2c-94c3-88d387f6bd7e" containerName="registry-server" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.044103 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdc567ce-9075-470e-867a-ffd15f55c152" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.045260 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.048138 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.048646 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.050984 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.050993 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.057200 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb"] Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.149741 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcpms\" (UniqueName: \"kubernetes.io/projected/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-kube-api-access-wcpms\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.150110 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.150926 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.254066 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.254756 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.255096 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcpms\" (UniqueName: \"kubernetes.io/projected/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-kube-api-access-wcpms\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.260826 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.261014 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.279822 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcpms\" (UniqueName: \"kubernetes.io/projected/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-kube-api-access-wcpms\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.380250 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:44:22 crc kubenswrapper[4682]: I1210 11:44:22.891674 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb"] Dec 10 11:44:23 crc kubenswrapper[4682]: I1210 11:44:23.608189 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" event={"ID":"29311a90-82aa-4b3f-a171-f7d45d0b9dc1","Type":"ContainerStarted","Data":"9144f19b1b5020a4a28ac6048d129d2b170c4fc6dbfacd4248ca753d6691e1a6"} Dec 10 11:44:25 crc kubenswrapper[4682]: I1210 11:44:25.632978 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" event={"ID":"29311a90-82aa-4b3f-a171-f7d45d0b9dc1","Type":"ContainerStarted","Data":"e9e505a6cf45222e9c2a503552661348c08de11f3f2c813025c03c3e801da4fb"} Dec 10 11:44:25 crc kubenswrapper[4682]: I1210 11:44:25.660037 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" podStartSLOduration=2.067243484 podStartE2EDuration="3.659996703s" podCreationTimestamp="2025-12-10 11:44:22 +0000 UTC" firstStartedPulling="2025-12-10 11:44:22.897283995 +0000 UTC m=+3543.217494745" lastFinishedPulling="2025-12-10 11:44:24.490037214 +0000 UTC m=+3544.810247964" observedRunningTime="2025-12-10 11:44:25.652038764 +0000 UTC m=+3545.972249524" watchObservedRunningTime="2025-12-10 11:44:25.659996703 +0000 UTC m=+3545.980207453" Dec 10 11:44:28 crc kubenswrapper[4682]: E1210 11:44:28.382536 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:44:28 crc kubenswrapper[4682]: E1210 11:44:28.382640 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.496674 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5ztff"] Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.499220 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.517377 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5ztff"] Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.666330 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqh4f\" (UniqueName: \"kubernetes.io/projected/9e3e5b17-ecad-4090-911d-37d92a72377b-kube-api-access-mqh4f\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.666569 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e3e5b17-ecad-4090-911d-37d92a72377b-catalog-content\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.666615 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e3e5b17-ecad-4090-911d-37d92a72377b-utilities\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.768510 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e3e5b17-ecad-4090-911d-37d92a72377b-catalog-content\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.768582 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e3e5b17-ecad-4090-911d-37d92a72377b-utilities\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.768746 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqh4f\" (UniqueName: \"kubernetes.io/projected/9e3e5b17-ecad-4090-911d-37d92a72377b-kube-api-access-mqh4f\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.769133 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e3e5b17-ecad-4090-911d-37d92a72377b-catalog-content\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.769189 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e3e5b17-ecad-4090-911d-37d92a72377b-utilities\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.792328 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqh4f\" (UniqueName: \"kubernetes.io/projected/9e3e5b17-ecad-4090-911d-37d92a72377b-kube-api-access-mqh4f\") pod \"certified-operators-5ztff\" (UID: \"9e3e5b17-ecad-4090-911d-37d92a72377b\") " pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:30 crc kubenswrapper[4682]: I1210 11:44:30.820031 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:31 crc kubenswrapper[4682]: W1210 11:44:31.423231 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e3e5b17_ecad_4090_911d_37d92a72377b.slice/crio-8b1fe0f99f086fa1923946425d8ed55aad7f333e6a3dff2b4e4d4717caf7da67 WatchSource:0}: Error finding container 8b1fe0f99f086fa1923946425d8ed55aad7f333e6a3dff2b4e4d4717caf7da67: Status 404 returned error can't find the container with id 8b1fe0f99f086fa1923946425d8ed55aad7f333e6a3dff2b4e4d4717caf7da67 Dec 10 11:44:31 crc kubenswrapper[4682]: I1210 11:44:31.432523 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5ztff"] Dec 10 11:44:31 crc kubenswrapper[4682]: I1210 11:44:31.700553 4682 generic.go:334] "Generic (PLEG): container finished" podID="9e3e5b17-ecad-4090-911d-37d92a72377b" containerID="cc5d347fa29ae3a6d3b2d957cfb77fe67d8bd7a71ad8a33ec6c1c4c871e45b3c" exitCode=0 Dec 10 11:44:31 crc kubenswrapper[4682]: I1210 11:44:31.700661 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ztff" event={"ID":"9e3e5b17-ecad-4090-911d-37d92a72377b","Type":"ContainerDied","Data":"cc5d347fa29ae3a6d3b2d957cfb77fe67d8bd7a71ad8a33ec6c1c4c871e45b3c"} Dec 10 11:44:31 crc kubenswrapper[4682]: I1210 11:44:31.700905 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ztff" event={"ID":"9e3e5b17-ecad-4090-911d-37d92a72377b","Type":"ContainerStarted","Data":"8b1fe0f99f086fa1923946425d8ed55aad7f333e6a3dff2b4e4d4717caf7da67"} Dec 10 11:44:37 crc kubenswrapper[4682]: I1210 11:44:37.765800 4682 generic.go:334] "Generic (PLEG): container finished" podID="9e3e5b17-ecad-4090-911d-37d92a72377b" containerID="7df9a55ccc1228b9e858e793dea0e7c01ad4d526bd5251a31cacbd3a36c4ecbf" exitCode=0 Dec 10 11:44:37 crc kubenswrapper[4682]: I1210 11:44:37.766341 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ztff" event={"ID":"9e3e5b17-ecad-4090-911d-37d92a72377b","Type":"ContainerDied","Data":"7df9a55ccc1228b9e858e793dea0e7c01ad4d526bd5251a31cacbd3a36c4ecbf"} Dec 10 11:44:38 crc kubenswrapper[4682]: I1210 11:44:38.779832 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5ztff" event={"ID":"9e3e5b17-ecad-4090-911d-37d92a72377b","Type":"ContainerStarted","Data":"7a025d5c62401cce6410df76780961ce9b6c57ebc71354179c7d280ddbafa214"} Dec 10 11:44:39 crc kubenswrapper[4682]: E1210 11:44:39.383449 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:44:40 crc kubenswrapper[4682]: I1210 11:44:40.821142 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:40 crc kubenswrapper[4682]: I1210 11:44:40.821205 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:40 crc kubenswrapper[4682]: I1210 11:44:40.895288 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:40 crc kubenswrapper[4682]: I1210 11:44:40.917522 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5ztff" podStartSLOduration=4.393971609 podStartE2EDuration="10.917500586s" podCreationTimestamp="2025-12-10 11:44:30 +0000 UTC" firstStartedPulling="2025-12-10 11:44:31.703726264 +0000 UTC m=+3552.023937014" lastFinishedPulling="2025-12-10 11:44:38.227255201 +0000 UTC m=+3558.547465991" observedRunningTime="2025-12-10 11:44:38.797382669 +0000 UTC m=+3559.117593449" watchObservedRunningTime="2025-12-10 11:44:40.917500586 +0000 UTC m=+3561.237711336" Dec 10 11:44:42 crc kubenswrapper[4682]: E1210 11:44:42.386797 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:44:50 crc kubenswrapper[4682]: I1210 11:44:50.876813 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5ztff" Dec 10 11:44:50 crc kubenswrapper[4682]: I1210 11:44:50.950166 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5ztff"] Dec 10 11:44:50 crc kubenswrapper[4682]: I1210 11:44:50.995557 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xd7ms"] Dec 10 11:44:50 crc kubenswrapper[4682]: I1210 11:44:50.995787 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xd7ms" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="registry-server" containerID="cri-o://5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3" gracePeriod=2 Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.733753 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.796481 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgtqv\" (UniqueName: \"kubernetes.io/projected/170511a5-ad2a-4906-94a0-a712cb687bb9-kube-api-access-mgtqv\") pod \"170511a5-ad2a-4906-94a0-a712cb687bb9\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.796600 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-utilities\") pod \"170511a5-ad2a-4906-94a0-a712cb687bb9\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.796820 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-catalog-content\") pod \"170511a5-ad2a-4906-94a0-a712cb687bb9\" (UID: \"170511a5-ad2a-4906-94a0-a712cb687bb9\") " Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.798744 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-utilities" (OuterVolumeSpecName: "utilities") pod "170511a5-ad2a-4906-94a0-a712cb687bb9" (UID: "170511a5-ad2a-4906-94a0-a712cb687bb9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.827384 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/170511a5-ad2a-4906-94a0-a712cb687bb9-kube-api-access-mgtqv" (OuterVolumeSpecName: "kube-api-access-mgtqv") pod "170511a5-ad2a-4906-94a0-a712cb687bb9" (UID: "170511a5-ad2a-4906-94a0-a712cb687bb9"). InnerVolumeSpecName "kube-api-access-mgtqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.866288 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "170511a5-ad2a-4906-94a0-a712cb687bb9" (UID: "170511a5-ad2a-4906-94a0-a712cb687bb9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.898892 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgtqv\" (UniqueName: \"kubernetes.io/projected/170511a5-ad2a-4906-94a0-a712cb687bb9-kube-api-access-mgtqv\") on node \"crc\" DevicePath \"\"" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.898938 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.898950 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/170511a5-ad2a-4906-94a0-a712cb687bb9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.931017 4682 generic.go:334] "Generic (PLEG): container finished" podID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerID="5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3" exitCode=0 Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.931072 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd7ms" event={"ID":"170511a5-ad2a-4906-94a0-a712cb687bb9","Type":"ContainerDied","Data":"5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3"} Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.931091 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd7ms" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.931125 4682 scope.go:117] "RemoveContainer" containerID="5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.931098 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd7ms" event={"ID":"170511a5-ad2a-4906-94a0-a712cb687bb9","Type":"ContainerDied","Data":"f0581389064f81ed75b2d1e166f62ce6e37efacb264d24a4dce6be95aa995d1b"} Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.956658 4682 scope.go:117] "RemoveContainer" containerID="e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6" Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.973309 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xd7ms"] Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.983362 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xd7ms"] Dec 10 11:44:51 crc kubenswrapper[4682]: I1210 11:44:51.995040 4682 scope.go:117] "RemoveContainer" containerID="b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.028645 4682 scope.go:117] "RemoveContainer" containerID="5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3" Dec 10 11:44:52 crc kubenswrapper[4682]: E1210 11:44:52.029094 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3\": container with ID starting with 5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3 not found: ID does not exist" containerID="5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.029124 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3"} err="failed to get container status \"5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3\": rpc error: code = NotFound desc = could not find container \"5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3\": container with ID starting with 5995f0421a1f93627c57d982ba2628c0f43fd1512f2f509ffb88172347b9bbf3 not found: ID does not exist" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.029144 4682 scope.go:117] "RemoveContainer" containerID="e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6" Dec 10 11:44:52 crc kubenswrapper[4682]: E1210 11:44:52.029327 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6\": container with ID starting with e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6 not found: ID does not exist" containerID="e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.029345 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6"} err="failed to get container status \"e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6\": rpc error: code = NotFound desc = could not find container \"e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6\": container with ID starting with e73dddecd7c52dd3e29aa7b52be7a365cdb9e80c08a719b610ebe1bba39f8eb6 not found: ID does not exist" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.029358 4682 scope.go:117] "RemoveContainer" containerID="b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309" Dec 10 11:44:52 crc kubenswrapper[4682]: E1210 11:44:52.029574 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309\": container with ID starting with b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309 not found: ID does not exist" containerID="b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.029594 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309"} err="failed to get container status \"b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309\": rpc error: code = NotFound desc = could not find container \"b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309\": container with ID starting with b32e7221752dfb3556a76c60a52146cea04440c074d07e90dcfb7944590af309 not found: ID does not exist" Dec 10 11:44:52 crc kubenswrapper[4682]: I1210 11:44:52.564693 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" path="/var/lib/kubelet/pods/170511a5-ad2a-4906-94a0-a712cb687bb9/volumes" Dec 10 11:44:53 crc kubenswrapper[4682]: E1210 11:44:53.384092 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:44:53 crc kubenswrapper[4682]: E1210 11:44:53.384627 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.192631 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm"] Dec 10 11:45:00 crc kubenswrapper[4682]: E1210 11:45:00.193677 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="registry-server" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.193691 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="registry-server" Dec 10 11:45:00 crc kubenswrapper[4682]: E1210 11:45:00.193717 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="extract-utilities" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.193723 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="extract-utilities" Dec 10 11:45:00 crc kubenswrapper[4682]: E1210 11:45:00.193738 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="extract-content" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.193744 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="extract-content" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.193990 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="170511a5-ad2a-4906-94a0-a712cb687bb9" containerName="registry-server" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.195014 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.197770 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.198447 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.203533 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm"] Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.314819 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6ca94d8-a405-49bd-8447-be165b736dcd-secret-volume\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.316972 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6ca94d8-a405-49bd-8447-be165b736dcd-config-volume\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.317140 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n92kc\" (UniqueName: \"kubernetes.io/projected/a6ca94d8-a405-49bd-8447-be165b736dcd-kube-api-access-n92kc\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.419411 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6ca94d8-a405-49bd-8447-be165b736dcd-config-volume\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.419505 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n92kc\" (UniqueName: \"kubernetes.io/projected/a6ca94d8-a405-49bd-8447-be165b736dcd-kube-api-access-n92kc\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.419616 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6ca94d8-a405-49bd-8447-be165b736dcd-secret-volume\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.420343 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6ca94d8-a405-49bd-8447-be165b736dcd-config-volume\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.425827 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6ca94d8-a405-49bd-8447-be165b736dcd-secret-volume\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.435704 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n92kc\" (UniqueName: \"kubernetes.io/projected/a6ca94d8-a405-49bd-8447-be165b736dcd-kube-api-access-n92kc\") pod \"collect-profiles-29422785-f4bsm\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.529072 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:00 crc kubenswrapper[4682]: I1210 11:45:00.977629 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm"] Dec 10 11:45:01 crc kubenswrapper[4682]: I1210 11:45:01.010045 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" event={"ID":"a6ca94d8-a405-49bd-8447-be165b736dcd","Type":"ContainerStarted","Data":"e6cb33897c1a110c0b35f576338a7ededa523a073696f7c53b0402964db1ab14"} Dec 10 11:45:02 crc kubenswrapper[4682]: I1210 11:45:02.022727 4682 generic.go:334] "Generic (PLEG): container finished" podID="a6ca94d8-a405-49bd-8447-be165b736dcd" containerID="c883edfca1c5a0fa8632535a2fa1c3625c0976f74b775c32feb43e8fd6a10da6" exitCode=0 Dec 10 11:45:02 crc kubenswrapper[4682]: I1210 11:45:02.022856 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" event={"ID":"a6ca94d8-a405-49bd-8447-be165b736dcd","Type":"ContainerDied","Data":"c883edfca1c5a0fa8632535a2fa1c3625c0976f74b775c32feb43e8fd6a10da6"} Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.488727 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.584893 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6ca94d8-a405-49bd-8447-be165b736dcd-secret-volume\") pod \"a6ca94d8-a405-49bd-8447-be165b736dcd\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.585287 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n92kc\" (UniqueName: \"kubernetes.io/projected/a6ca94d8-a405-49bd-8447-be165b736dcd-kube-api-access-n92kc\") pod \"a6ca94d8-a405-49bd-8447-be165b736dcd\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.585497 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6ca94d8-a405-49bd-8447-be165b736dcd-config-volume\") pod \"a6ca94d8-a405-49bd-8447-be165b736dcd\" (UID: \"a6ca94d8-a405-49bd-8447-be165b736dcd\") " Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.586286 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6ca94d8-a405-49bd-8447-be165b736dcd-config-volume" (OuterVolumeSpecName: "config-volume") pod "a6ca94d8-a405-49bd-8447-be165b736dcd" (UID: "a6ca94d8-a405-49bd-8447-be165b736dcd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.590284 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6ca94d8-a405-49bd-8447-be165b736dcd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a6ca94d8-a405-49bd-8447-be165b736dcd" (UID: "a6ca94d8-a405-49bd-8447-be165b736dcd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.591789 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6ca94d8-a405-49bd-8447-be165b736dcd-kube-api-access-n92kc" (OuterVolumeSpecName: "kube-api-access-n92kc") pod "a6ca94d8-a405-49bd-8447-be165b736dcd" (UID: "a6ca94d8-a405-49bd-8447-be165b736dcd"). InnerVolumeSpecName "kube-api-access-n92kc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.694354 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6ca94d8-a405-49bd-8447-be165b736dcd-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.694407 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n92kc\" (UniqueName: \"kubernetes.io/projected/a6ca94d8-a405-49bd-8447-be165b736dcd-kube-api-access-n92kc\") on node \"crc\" DevicePath \"\"" Dec 10 11:45:03 crc kubenswrapper[4682]: I1210 11:45:03.694420 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6ca94d8-a405-49bd-8447-be165b736dcd-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 11:45:04 crc kubenswrapper[4682]: I1210 11:45:04.044345 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" event={"ID":"a6ca94d8-a405-49bd-8447-be165b736dcd","Type":"ContainerDied","Data":"e6cb33897c1a110c0b35f576338a7ededa523a073696f7c53b0402964db1ab14"} Dec 10 11:45:04 crc kubenswrapper[4682]: I1210 11:45:04.044390 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6cb33897c1a110c0b35f576338a7ededa523a073696f7c53b0402964db1ab14" Dec 10 11:45:04 crc kubenswrapper[4682]: I1210 11:45:04.044422 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422785-f4bsm" Dec 10 11:45:04 crc kubenswrapper[4682]: I1210 11:45:04.567298 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h"] Dec 10 11:45:04 crc kubenswrapper[4682]: I1210 11:45:04.576519 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422740-l6l8h"] Dec 10 11:45:06 crc kubenswrapper[4682]: E1210 11:45:06.384215 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:45:06 crc kubenswrapper[4682]: E1210 11:45:06.385251 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:45:06 crc kubenswrapper[4682]: I1210 11:45:06.405042 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edb14eb0-e25a-46c3-b8da-601fd04ad0a1" path="/var/lib/kubelet/pods/edb14eb0-e25a-46c3-b8da-601fd04ad0a1/volumes" Dec 10 11:45:19 crc kubenswrapper[4682]: E1210 11:45:19.382408 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:45:19 crc kubenswrapper[4682]: E1210 11:45:19.382947 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:45:32 crc kubenswrapper[4682]: E1210 11:45:32.385753 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:45:33 crc kubenswrapper[4682]: E1210 11:45:33.383840 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:45:35 crc kubenswrapper[4682]: I1210 11:45:35.434937 4682 scope.go:117] "RemoveContainer" containerID="752892be936f60f0a96523b9422010c7b4158e5eaee9bdb8bbf30d7bed6fb485" Dec 10 11:45:36 crc kubenswrapper[4682]: I1210 11:45:36.480084 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:45:36 crc kubenswrapper[4682]: I1210 11:45:36.480530 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:45:46 crc kubenswrapper[4682]: E1210 11:45:46.384221 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:45:47 crc kubenswrapper[4682]: E1210 11:45:47.383848 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:46:00 crc kubenswrapper[4682]: E1210 11:46:00.392964 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:46:02 crc kubenswrapper[4682]: E1210 11:46:01.709093 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:46:02 crc kubenswrapper[4682]: I1210 11:46:01.971697 4682 patch_prober.go:28] interesting pod/router-default-5444994796-jfqfn container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:46:02 crc kubenswrapper[4682]: I1210 11:46:01.972003 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-jfqfn" podUID="51fb452a-e943-4222-a52b-dbdc0f378760" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:46:06 crc kubenswrapper[4682]: I1210 11:46:06.481598 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:46:06 crc kubenswrapper[4682]: I1210 11:46:06.482124 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:46:13 crc kubenswrapper[4682]: E1210 11:46:13.386332 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:46:14 crc kubenswrapper[4682]: E1210 11:46:14.382740 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:46:28 crc kubenswrapper[4682]: E1210 11:46:28.389090 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:46:29 crc kubenswrapper[4682]: E1210 11:46:29.383773 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:46:36 crc kubenswrapper[4682]: I1210 11:46:36.478422 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:46:36 crc kubenswrapper[4682]: I1210 11:46:36.479174 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:46:36 crc kubenswrapper[4682]: I1210 11:46:36.479242 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:46:36 crc kubenswrapper[4682]: I1210 11:46:36.480424 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:46:36 crc kubenswrapper[4682]: I1210 11:46:36.480576 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" gracePeriod=600 Dec 10 11:46:36 crc kubenswrapper[4682]: E1210 11:46:36.610906 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:46:37 crc kubenswrapper[4682]: I1210 11:46:37.159348 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" exitCode=0 Dec 10 11:46:37 crc kubenswrapper[4682]: I1210 11:46:37.159403 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a"} Dec 10 11:46:37 crc kubenswrapper[4682]: I1210 11:46:37.159441 4682 scope.go:117] "RemoveContainer" containerID="6e39aa4df01d4732c79541b988dca57f788a157df81d4b776c192d30c6b06276" Dec 10 11:46:37 crc kubenswrapper[4682]: I1210 11:46:37.160210 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:46:37 crc kubenswrapper[4682]: E1210 11:46:37.160725 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:46:41 crc kubenswrapper[4682]: E1210 11:46:41.385675 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:46:42 crc kubenswrapper[4682]: E1210 11:46:42.383539 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:46:51 crc kubenswrapper[4682]: I1210 11:46:51.381632 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:46:51 crc kubenswrapper[4682]: E1210 11:46:51.382642 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:46:53 crc kubenswrapper[4682]: E1210 11:46:53.384188 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:46:55 crc kubenswrapper[4682]: E1210 11:46:55.382516 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:47:03 crc kubenswrapper[4682]: I1210 11:47:03.381098 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:47:03 crc kubenswrapper[4682]: E1210 11:47:03.382282 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:47:08 crc kubenswrapper[4682]: E1210 11:47:08.387700 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:47:09 crc kubenswrapper[4682]: E1210 11:47:09.383375 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:47:15 crc kubenswrapper[4682]: I1210 11:47:15.381443 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:47:15 crc kubenswrapper[4682]: E1210 11:47:15.382250 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:47:20 crc kubenswrapper[4682]: E1210 11:47:20.392521 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:47:24 crc kubenswrapper[4682]: E1210 11:47:24.383890 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:47:26 crc kubenswrapper[4682]: I1210 11:47:26.381576 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:47:26 crc kubenswrapper[4682]: E1210 11:47:26.382312 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:47:34 crc kubenswrapper[4682]: E1210 11:47:34.385095 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:47:38 crc kubenswrapper[4682]: I1210 11:47:38.384365 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:47:38 crc kubenswrapper[4682]: E1210 11:47:38.499581 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:47:38 crc kubenswrapper[4682]: E1210 11:47:38.499672 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:47:38 crc kubenswrapper[4682]: E1210 11:47:38.499879 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:47:38 crc kubenswrapper[4682]: E1210 11:47:38.501261 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.538753 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8wp2f"] Dec 10 11:47:39 crc kubenswrapper[4682]: E1210 11:47:39.540545 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6ca94d8-a405-49bd-8447-be165b736dcd" containerName="collect-profiles" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.540656 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6ca94d8-a405-49bd-8447-be165b736dcd" containerName="collect-profiles" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.541863 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6ca94d8-a405-49bd-8447-be165b736dcd" containerName="collect-profiles" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.551653 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.582079 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8wp2f"] Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.696421 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-catalog-content\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.696839 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h25nd\" (UniqueName: \"kubernetes.io/projected/6d11f2db-066d-4146-bcb5-73beb493c477-kube-api-access-h25nd\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.696863 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-utilities\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.799295 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-catalog-content\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.799394 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h25nd\" (UniqueName: \"kubernetes.io/projected/6d11f2db-066d-4146-bcb5-73beb493c477-kube-api-access-h25nd\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.799421 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-utilities\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.800191 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-utilities\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.800194 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-catalog-content\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.818818 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h25nd\" (UniqueName: \"kubernetes.io/projected/6d11f2db-066d-4146-bcb5-73beb493c477-kube-api-access-h25nd\") pod \"community-operators-8wp2f\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:39 crc kubenswrapper[4682]: I1210 11:47:39.896075 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:40 crc kubenswrapper[4682]: I1210 11:47:40.387013 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:47:40 crc kubenswrapper[4682]: E1210 11:47:40.387603 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:47:40 crc kubenswrapper[4682]: I1210 11:47:40.541537 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8wp2f"] Dec 10 11:47:40 crc kubenswrapper[4682]: I1210 11:47:40.873431 4682 generic.go:334] "Generic (PLEG): container finished" podID="6d11f2db-066d-4146-bcb5-73beb493c477" containerID="4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14" exitCode=0 Dec 10 11:47:40 crc kubenswrapper[4682]: I1210 11:47:40.873785 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerDied","Data":"4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14"} Dec 10 11:47:40 crc kubenswrapper[4682]: I1210 11:47:40.873816 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerStarted","Data":"55412227fc4f8630e0f1d0be253abc07f02652a7830d0a9071d9cc4ada61a8ff"} Dec 10 11:47:41 crc kubenswrapper[4682]: I1210 11:47:41.882695 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerStarted","Data":"6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0"} Dec 10 11:47:42 crc kubenswrapper[4682]: I1210 11:47:42.894040 4682 generic.go:334] "Generic (PLEG): container finished" podID="6d11f2db-066d-4146-bcb5-73beb493c477" containerID="6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0" exitCode=0 Dec 10 11:47:42 crc kubenswrapper[4682]: I1210 11:47:42.894236 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerDied","Data":"6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0"} Dec 10 11:47:44 crc kubenswrapper[4682]: I1210 11:47:44.927862 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerStarted","Data":"f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f"} Dec 10 11:47:44 crc kubenswrapper[4682]: I1210 11:47:44.949106 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8wp2f" podStartSLOduration=2.842043903 podStartE2EDuration="5.949089829s" podCreationTimestamp="2025-12-10 11:47:39 +0000 UTC" firstStartedPulling="2025-12-10 11:47:40.876027408 +0000 UTC m=+3741.196238168" lastFinishedPulling="2025-12-10 11:47:43.983073334 +0000 UTC m=+3744.303284094" observedRunningTime="2025-12-10 11:47:44.945411504 +0000 UTC m=+3745.265622254" watchObservedRunningTime="2025-12-10 11:47:44.949089829 +0000 UTC m=+3745.269300579" Dec 10 11:47:46 crc kubenswrapper[4682]: E1210 11:47:46.464022 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:47:46 crc kubenswrapper[4682]: E1210 11:47:46.464273 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:47:46 crc kubenswrapper[4682]: E1210 11:47:46.464379 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:47:46 crc kubenswrapper[4682]: E1210 11:47:46.465607 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:47:49 crc kubenswrapper[4682]: I1210 11:47:49.896190 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:49 crc kubenswrapper[4682]: I1210 11:47:49.896802 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:49 crc kubenswrapper[4682]: I1210 11:47:49.998896 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:50 crc kubenswrapper[4682]: I1210 11:47:50.049421 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:52 crc kubenswrapper[4682]: I1210 11:47:52.381064 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:47:52 crc kubenswrapper[4682]: E1210 11:47:52.381673 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:47:53 crc kubenswrapper[4682]: E1210 11:47:53.383162 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:47:53 crc kubenswrapper[4682]: I1210 11:47:53.526516 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8wp2f"] Dec 10 11:47:53 crc kubenswrapper[4682]: I1210 11:47:53.526754 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8wp2f" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="registry-server" containerID="cri-o://f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f" gracePeriod=2 Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.021890 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.069071 4682 generic.go:334] "Generic (PLEG): container finished" podID="6d11f2db-066d-4146-bcb5-73beb493c477" containerID="f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f" exitCode=0 Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.069115 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerDied","Data":"f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f"} Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.069141 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8wp2f" event={"ID":"6d11f2db-066d-4146-bcb5-73beb493c477","Type":"ContainerDied","Data":"55412227fc4f8630e0f1d0be253abc07f02652a7830d0a9071d9cc4ada61a8ff"} Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.069148 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8wp2f" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.069161 4682 scope.go:117] "RemoveContainer" containerID="f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.093299 4682 scope.go:117] "RemoveContainer" containerID="6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.114022 4682 scope.go:117] "RemoveContainer" containerID="4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.174900 4682 scope.go:117] "RemoveContainer" containerID="f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f" Dec 10 11:47:54 crc kubenswrapper[4682]: E1210 11:47:54.175596 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f\": container with ID starting with f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f not found: ID does not exist" containerID="f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.175630 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f"} err="failed to get container status \"f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f\": rpc error: code = NotFound desc = could not find container \"f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f\": container with ID starting with f8998a6c148b6bf3004430de6c2c5619df3c8db2f7635a940cc63f964351992f not found: ID does not exist" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.175651 4682 scope.go:117] "RemoveContainer" containerID="6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0" Dec 10 11:47:54 crc kubenswrapper[4682]: E1210 11:47:54.176086 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0\": container with ID starting with 6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0 not found: ID does not exist" containerID="6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.176114 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0"} err="failed to get container status \"6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0\": rpc error: code = NotFound desc = could not find container \"6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0\": container with ID starting with 6f4955723bb643e2584e951f9023c5c0e55a7b9ff8b23b58ad7f507ad4a10aa0 not found: ID does not exist" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.176134 4682 scope.go:117] "RemoveContainer" containerID="4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14" Dec 10 11:47:54 crc kubenswrapper[4682]: E1210 11:47:54.176380 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14\": container with ID starting with 4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14 not found: ID does not exist" containerID="4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.176403 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14"} err="failed to get container status \"4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14\": rpc error: code = NotFound desc = could not find container \"4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14\": container with ID starting with 4145dfefef7952ed74e00c4272158e96490255e555169d1815d7b47cd114db14 not found: ID does not exist" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.180037 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-catalog-content\") pod \"6d11f2db-066d-4146-bcb5-73beb493c477\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.180199 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h25nd\" (UniqueName: \"kubernetes.io/projected/6d11f2db-066d-4146-bcb5-73beb493c477-kube-api-access-h25nd\") pod \"6d11f2db-066d-4146-bcb5-73beb493c477\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.180413 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-utilities\") pod \"6d11f2db-066d-4146-bcb5-73beb493c477\" (UID: \"6d11f2db-066d-4146-bcb5-73beb493c477\") " Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.181283 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-utilities" (OuterVolumeSpecName: "utilities") pod "6d11f2db-066d-4146-bcb5-73beb493c477" (UID: "6d11f2db-066d-4146-bcb5-73beb493c477"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.186184 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d11f2db-066d-4146-bcb5-73beb493c477-kube-api-access-h25nd" (OuterVolumeSpecName: "kube-api-access-h25nd") pod "6d11f2db-066d-4146-bcb5-73beb493c477" (UID: "6d11f2db-066d-4146-bcb5-73beb493c477"). InnerVolumeSpecName "kube-api-access-h25nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.247408 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d11f2db-066d-4146-bcb5-73beb493c477" (UID: "6d11f2db-066d-4146-bcb5-73beb493c477"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.282736 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.282774 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d11f2db-066d-4146-bcb5-73beb493c477-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.282788 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h25nd\" (UniqueName: \"kubernetes.io/projected/6d11f2db-066d-4146-bcb5-73beb493c477-kube-api-access-h25nd\") on node \"crc\" DevicePath \"\"" Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.463424 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8wp2f"] Dec 10 11:47:54 crc kubenswrapper[4682]: I1210 11:47:54.471648 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8wp2f"] Dec 10 11:47:56 crc kubenswrapper[4682]: I1210 11:47:56.395771 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" path="/var/lib/kubelet/pods/6d11f2db-066d-4146-bcb5-73beb493c477/volumes" Dec 10 11:48:01 crc kubenswrapper[4682]: E1210 11:48:01.387116 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:48:07 crc kubenswrapper[4682]: I1210 11:48:07.381652 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:48:07 crc kubenswrapper[4682]: E1210 11:48:07.382494 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:48:07 crc kubenswrapper[4682]: E1210 11:48:07.384161 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:48:16 crc kubenswrapper[4682]: E1210 11:48:16.383986 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:48:19 crc kubenswrapper[4682]: E1210 11:48:19.386059 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:48:20 crc kubenswrapper[4682]: I1210 11:48:20.380553 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:48:20 crc kubenswrapper[4682]: E1210 11:48:20.380810 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:48:31 crc kubenswrapper[4682]: I1210 11:48:31.381171 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:48:31 crc kubenswrapper[4682]: E1210 11:48:31.382230 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:48:31 crc kubenswrapper[4682]: E1210 11:48:31.383348 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:48:34 crc kubenswrapper[4682]: E1210 11:48:34.383075 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:48:45 crc kubenswrapper[4682]: I1210 11:48:45.390939 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:48:45 crc kubenswrapper[4682]: E1210 11:48:45.392355 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:48:46 crc kubenswrapper[4682]: E1210 11:48:46.384750 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:48:46 crc kubenswrapper[4682]: E1210 11:48:46.384773 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:48:57 crc kubenswrapper[4682]: E1210 11:48:57.386188 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:48:58 crc kubenswrapper[4682]: I1210 11:48:58.381485 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:48:58 crc kubenswrapper[4682]: E1210 11:48:58.382076 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:48:58 crc kubenswrapper[4682]: E1210 11:48:58.382832 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.126600 4682 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dbszw container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.127015 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" podUID="cfbd68ba-8aec-439c-9549-9347c5e80d21" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.127415 4682 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-dbszw container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.127442 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-dbszw" podUID="cfbd68ba-8aec-439c-9549-9347c5e80d21" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.147149 4682 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-pcvj2 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.65:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.147234 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" podUID="27962a48-9d75-4437-bc45-9258a223ebbb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.65:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.147310 4682 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-pcvj2 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.65:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 11:49:02 crc kubenswrapper[4682]: I1210 11:49:02.147327 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-pcvj2" podUID="27962a48-9d75-4437-bc45-9258a223ebbb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.65:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 11:49:09 crc kubenswrapper[4682]: E1210 11:49:09.385185 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:49:10 crc kubenswrapper[4682]: E1210 11:49:10.390949 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:49:13 crc kubenswrapper[4682]: I1210 11:49:13.381452 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:49:13 crc kubenswrapper[4682]: E1210 11:49:13.382409 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:49:23 crc kubenswrapper[4682]: E1210 11:49:23.384482 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:49:23 crc kubenswrapper[4682]: E1210 11:49:23.385018 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:49:26 crc kubenswrapper[4682]: I1210 11:49:26.382377 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:49:26 crc kubenswrapper[4682]: E1210 11:49:26.382946 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:49:36 crc kubenswrapper[4682]: E1210 11:49:36.385242 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:49:36 crc kubenswrapper[4682]: E1210 11:49:36.385824 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:49:38 crc kubenswrapper[4682]: I1210 11:49:38.381799 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:49:38 crc kubenswrapper[4682]: E1210 11:49:38.382389 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:49:47 crc kubenswrapper[4682]: E1210 11:49:47.384941 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:49:49 crc kubenswrapper[4682]: E1210 11:49:49.385295 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:49:53 crc kubenswrapper[4682]: I1210 11:49:53.381347 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:49:53 crc kubenswrapper[4682]: E1210 11:49:53.382246 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:50:01 crc kubenswrapper[4682]: E1210 11:50:01.384584 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:50:04 crc kubenswrapper[4682]: E1210 11:50:04.390181 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:50:08 crc kubenswrapper[4682]: I1210 11:50:08.380967 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:50:08 crc kubenswrapper[4682]: E1210 11:50:08.382183 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:50:14 crc kubenswrapper[4682]: E1210 11:50:14.391973 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:50:17 crc kubenswrapper[4682]: E1210 11:50:17.382706 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:50:23 crc kubenswrapper[4682]: I1210 11:50:23.381146 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:50:23 crc kubenswrapper[4682]: E1210 11:50:23.381910 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:50:29 crc kubenswrapper[4682]: E1210 11:50:29.383809 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:50:30 crc kubenswrapper[4682]: E1210 11:50:30.390275 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:50:34 crc kubenswrapper[4682]: I1210 11:50:34.381077 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:50:34 crc kubenswrapper[4682]: E1210 11:50:34.381861 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:50:41 crc kubenswrapper[4682]: E1210 11:50:41.424499 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:50:44 crc kubenswrapper[4682]: E1210 11:50:44.385664 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:50:45 crc kubenswrapper[4682]: I1210 11:50:45.417833 4682 generic.go:334] "Generic (PLEG): container finished" podID="29311a90-82aa-4b3f-a171-f7d45d0b9dc1" containerID="e9e505a6cf45222e9c2a503552661348c08de11f3f2c813025c03c3e801da4fb" exitCode=2 Dec 10 11:50:45 crc kubenswrapper[4682]: I1210 11:50:45.417878 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" event={"ID":"29311a90-82aa-4b3f-a171-f7d45d0b9dc1","Type":"ContainerDied","Data":"e9e505a6cf45222e9c2a503552661348c08de11f3f2c813025c03c3e801da4fb"} Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.436942 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" event={"ID":"29311a90-82aa-4b3f-a171-f7d45d0b9dc1","Type":"ContainerDied","Data":"9144f19b1b5020a4a28ac6048d129d2b170c4fc6dbfacd4248ca753d6691e1a6"} Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.437402 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9144f19b1b5020a4a28ac6048d129d2b170c4fc6dbfacd4248ca753d6691e1a6" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.456491 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.510909 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-ssh-key\") pod \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.511227 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-inventory\") pod \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.511428 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcpms\" (UniqueName: \"kubernetes.io/projected/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-kube-api-access-wcpms\") pod \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\" (UID: \"29311a90-82aa-4b3f-a171-f7d45d0b9dc1\") " Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.517758 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-kube-api-access-wcpms" (OuterVolumeSpecName: "kube-api-access-wcpms") pod "29311a90-82aa-4b3f-a171-f7d45d0b9dc1" (UID: "29311a90-82aa-4b3f-a171-f7d45d0b9dc1"). InnerVolumeSpecName "kube-api-access-wcpms". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.547720 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "29311a90-82aa-4b3f-a171-f7d45d0b9dc1" (UID: "29311a90-82aa-4b3f-a171-f7d45d0b9dc1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.569035 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-inventory" (OuterVolumeSpecName: "inventory") pod "29311a90-82aa-4b3f-a171-f7d45d0b9dc1" (UID: "29311a90-82aa-4b3f-a171-f7d45d0b9dc1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.613872 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcpms\" (UniqueName: \"kubernetes.io/projected/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-kube-api-access-wcpms\") on node \"crc\" DevicePath \"\"" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.613903 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:50:47 crc kubenswrapper[4682]: I1210 11:50:47.613912 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29311a90-82aa-4b3f-a171-f7d45d0b9dc1-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:50:48 crc kubenswrapper[4682]: I1210 11:50:48.444623 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb" Dec 10 11:50:49 crc kubenswrapper[4682]: I1210 11:50:49.382865 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:50:49 crc kubenswrapper[4682]: E1210 11:50:49.383334 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:50:52 crc kubenswrapper[4682]: E1210 11:50:52.383989 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:50:59 crc kubenswrapper[4682]: E1210 11:50:59.383192 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:51:00 crc kubenswrapper[4682]: I1210 11:51:00.388102 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:51:00 crc kubenswrapper[4682]: E1210 11:51:00.389368 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:51:04 crc kubenswrapper[4682]: E1210 11:51:04.386939 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:51:14 crc kubenswrapper[4682]: E1210 11:51:14.384768 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:51:15 crc kubenswrapper[4682]: I1210 11:51:15.382160 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:51:15 crc kubenswrapper[4682]: E1210 11:51:15.383216 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:51:17 crc kubenswrapper[4682]: E1210 11:51:17.383719 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:51:26 crc kubenswrapper[4682]: E1210 11:51:26.384565 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:51:27 crc kubenswrapper[4682]: I1210 11:51:27.382022 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:51:27 crc kubenswrapper[4682]: E1210 11:51:27.382661 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:51:28 crc kubenswrapper[4682]: E1210 11:51:28.384529 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:51:37 crc kubenswrapper[4682]: E1210 11:51:37.383015 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:51:39 crc kubenswrapper[4682]: E1210 11:51:39.383390 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:51:40 crc kubenswrapper[4682]: I1210 11:51:40.399091 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:51:41 crc kubenswrapper[4682]: I1210 11:51:41.566950 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"60c3fbe6230de68ee431d21fa11b812406e0934693ca871cb3512f4172af1fe3"} Dec 10 11:51:51 crc kubenswrapper[4682]: E1210 11:51:51.385603 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:51:52 crc kubenswrapper[4682]: E1210 11:51:52.384277 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:52:05 crc kubenswrapper[4682]: E1210 11:52:05.411405 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:52:07 crc kubenswrapper[4682]: E1210 11:52:07.384461 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:52:19 crc kubenswrapper[4682]: E1210 11:52:19.385402 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:52:22 crc kubenswrapper[4682]: E1210 11:52:22.383456 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:52:31 crc kubenswrapper[4682]: E1210 11:52:31.385608 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:52:34 crc kubenswrapper[4682]: E1210 11:52:34.383936 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:52:43 crc kubenswrapper[4682]: I1210 11:52:43.786588 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="d9f85710-54c3-4f30-88f6-bb97f9a200e8" containerName="galera" probeResult="failure" output="command timed out" Dec 10 11:52:43 crc kubenswrapper[4682]: I1210 11:52:43.787139 4682 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="d9f85710-54c3-4f30-88f6-bb97f9a200e8" containerName="galera" probeResult="failure" output="command timed out" Dec 10 11:52:45 crc kubenswrapper[4682]: E1210 11:52:45.384923 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:52:48 crc kubenswrapper[4682]: I1210 11:52:48.383146 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:52:48 crc kubenswrapper[4682]: E1210 11:52:48.488575 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:52:48 crc kubenswrapper[4682]: E1210 11:52:48.488652 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:52:48 crc kubenswrapper[4682]: E1210 11:52:48.488884 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:52:48 crc kubenswrapper[4682]: E1210 11:52:48.490759 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:52:58 crc kubenswrapper[4682]: E1210 11:52:58.487377 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:52:58 crc kubenswrapper[4682]: E1210 11:52:58.489386 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:52:58 crc kubenswrapper[4682]: E1210 11:52:58.489885 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:52:58 crc kubenswrapper[4682]: E1210 11:52:58.491802 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:53:02 crc kubenswrapper[4682]: E1210 11:53:02.383402 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:53:09 crc kubenswrapper[4682]: E1210 11:53:09.382916 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:53:14 crc kubenswrapper[4682]: E1210 11:53:14.384333 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:53:20 crc kubenswrapper[4682]: E1210 11:53:20.413931 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.251964 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq"] Dec 10 11:53:25 crc kubenswrapper[4682]: E1210 11:53:25.254418 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="registry-server" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.254543 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="registry-server" Dec 10 11:53:25 crc kubenswrapper[4682]: E1210 11:53:25.254630 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="extract-utilities" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.254711 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="extract-utilities" Dec 10 11:53:25 crc kubenswrapper[4682]: E1210 11:53:25.254783 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29311a90-82aa-4b3f-a171-f7d45d0b9dc1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.254840 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="29311a90-82aa-4b3f-a171-f7d45d0b9dc1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:53:25 crc kubenswrapper[4682]: E1210 11:53:25.254949 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="extract-content" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.254976 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="extract-content" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.257499 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d11f2db-066d-4146-bcb5-73beb493c477" containerName="registry-server" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.257563 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="29311a90-82aa-4b3f-a171-f7d45d0b9dc1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.259560 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.268231 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq"] Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.274266 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.274577 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.275220 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.275365 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.426735 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.427259 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hg5n\" (UniqueName: \"kubernetes.io/projected/8d02d5aa-758d-49b4-aa9e-77062c9af129-kube-api-access-9hg5n\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.427524 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.529919 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.530355 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.530793 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hg5n\" (UniqueName: \"kubernetes.io/projected/8d02d5aa-758d-49b4-aa9e-77062c9af129-kube-api-access-9hg5n\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.535386 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.543169 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.548278 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hg5n\" (UniqueName: \"kubernetes.io/projected/8d02d5aa-758d-49b4-aa9e-77062c9af129-kube-api-access-9hg5n\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-smtgq\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:25 crc kubenswrapper[4682]: I1210 11:53:25.588772 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:53:26 crc kubenswrapper[4682]: W1210 11:53:26.152670 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d02d5aa_758d_49b4_aa9e_77062c9af129.slice/crio-0c2bf19e16cd77b9c33743201b717217c98ad5ec919d0e9e659cf4095385e98b WatchSource:0}: Error finding container 0c2bf19e16cd77b9c33743201b717217c98ad5ec919d0e9e659cf4095385e98b: Status 404 returned error can't find the container with id 0c2bf19e16cd77b9c33743201b717217c98ad5ec919d0e9e659cf4095385e98b Dec 10 11:53:26 crc kubenswrapper[4682]: I1210 11:53:26.153742 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq"] Dec 10 11:53:26 crc kubenswrapper[4682]: E1210 11:53:26.382758 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:53:26 crc kubenswrapper[4682]: I1210 11:53:26.391376 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" event={"ID":"8d02d5aa-758d-49b4-aa9e-77062c9af129","Type":"ContainerStarted","Data":"0c2bf19e16cd77b9c33743201b717217c98ad5ec919d0e9e659cf4095385e98b"} Dec 10 11:53:28 crc kubenswrapper[4682]: I1210 11:53:28.426456 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" event={"ID":"8d02d5aa-758d-49b4-aa9e-77062c9af129","Type":"ContainerStarted","Data":"02d90643b99baac2d4922d2e4c9943a48219e3610b3fadaec1a34383a4e13946"} Dec 10 11:53:28 crc kubenswrapper[4682]: I1210 11:53:28.463844 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" podStartSLOduration=2.8194751289999997 podStartE2EDuration="3.463816798s" podCreationTimestamp="2025-12-10 11:53:25 +0000 UTC" firstStartedPulling="2025-12-10 11:53:26.154839396 +0000 UTC m=+4086.475050146" lastFinishedPulling="2025-12-10 11:53:26.799181065 +0000 UTC m=+4087.119391815" observedRunningTime="2025-12-10 11:53:28.455982663 +0000 UTC m=+4088.776193403" watchObservedRunningTime="2025-12-10 11:53:28.463816798 +0000 UTC m=+4088.784027588" Dec 10 11:53:33 crc kubenswrapper[4682]: E1210 11:53:33.384179 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:53:38 crc kubenswrapper[4682]: E1210 11:53:38.384382 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:53:44 crc kubenswrapper[4682]: E1210 11:53:44.387584 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:53:53 crc kubenswrapper[4682]: E1210 11:53:53.383525 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:53:55 crc kubenswrapper[4682]: E1210 11:53:55.383019 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:54:06 crc kubenswrapper[4682]: I1210 11:54:06.478931 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:54:06 crc kubenswrapper[4682]: I1210 11:54:06.479463 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:54:08 crc kubenswrapper[4682]: E1210 11:54:08.384389 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:54:08 crc kubenswrapper[4682]: E1210 11:54:08.384845 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:54:19 crc kubenswrapper[4682]: E1210 11:54:19.384434 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:54:22 crc kubenswrapper[4682]: E1210 11:54:22.382948 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:54:31 crc kubenswrapper[4682]: E1210 11:54:31.384609 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:54:33 crc kubenswrapper[4682]: E1210 11:54:33.382416 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:54:36 crc kubenswrapper[4682]: I1210 11:54:36.479130 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:54:36 crc kubenswrapper[4682]: I1210 11:54:36.480011 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:54:45 crc kubenswrapper[4682]: E1210 11:54:45.384871 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.639875 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q5wdq"] Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.642511 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.657736 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q5wdq"] Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.771505 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-catalog-content\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.771899 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-utilities\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.772119 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk76q\" (UniqueName: \"kubernetes.io/projected/d48b7c81-841f-4911-9fa2-19868f25d28e-kube-api-access-zk76q\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.874166 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk76q\" (UniqueName: \"kubernetes.io/projected/d48b7c81-841f-4911-9fa2-19868f25d28e-kube-api-access-zk76q\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.874355 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-catalog-content\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.874457 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-utilities\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.875088 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-utilities\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.875094 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-catalog-content\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.896232 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk76q\" (UniqueName: \"kubernetes.io/projected/d48b7c81-841f-4911-9fa2-19868f25d28e-kube-api-access-zk76q\") pod \"certified-operators-q5wdq\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:45 crc kubenswrapper[4682]: I1210 11:54:45.971328 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:46 crc kubenswrapper[4682]: I1210 11:54:46.513816 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q5wdq"] Dec 10 11:54:47 crc kubenswrapper[4682]: I1210 11:54:47.327653 4682 generic.go:334] "Generic (PLEG): container finished" podID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerID="15fa984ade8afde8acf4b27db935b493eedeadcbc61979684ed5237e9a63437f" exitCode=0 Dec 10 11:54:47 crc kubenswrapper[4682]: I1210 11:54:47.327900 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerDied","Data":"15fa984ade8afde8acf4b27db935b493eedeadcbc61979684ed5237e9a63437f"} Dec 10 11:54:47 crc kubenswrapper[4682]: I1210 11:54:47.328301 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerStarted","Data":"1b5f560587957a9ef05cac90adf67db047435b419e21a3396da2fc9128e36084"} Dec 10 11:54:47 crc kubenswrapper[4682]: E1210 11:54:47.382551 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:54:48 crc kubenswrapper[4682]: I1210 11:54:48.339838 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerStarted","Data":"4f84e7a6aeae9ae2684431b8f7b3d9af54df9466be82cb69ea1b0379ed0a3ad4"} Dec 10 11:54:49 crc kubenswrapper[4682]: I1210 11:54:49.355011 4682 generic.go:334] "Generic (PLEG): container finished" podID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerID="4f84e7a6aeae9ae2684431b8f7b3d9af54df9466be82cb69ea1b0379ed0a3ad4" exitCode=0 Dec 10 11:54:49 crc kubenswrapper[4682]: I1210 11:54:49.355084 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerDied","Data":"4f84e7a6aeae9ae2684431b8f7b3d9af54df9466be82cb69ea1b0379ed0a3ad4"} Dec 10 11:54:50 crc kubenswrapper[4682]: I1210 11:54:50.368202 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerStarted","Data":"21041cf6b480d3d397ad0844fd8e5102931eae6b059fca05f6ad037b8f9ae1ba"} Dec 10 11:54:50 crc kubenswrapper[4682]: I1210 11:54:50.399595 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q5wdq" podStartSLOduration=2.871933984 podStartE2EDuration="5.39957385s" podCreationTimestamp="2025-12-10 11:54:45 +0000 UTC" firstStartedPulling="2025-12-10 11:54:47.332418727 +0000 UTC m=+4167.652629487" lastFinishedPulling="2025-12-10 11:54:49.860058603 +0000 UTC m=+4170.180269353" observedRunningTime="2025-12-10 11:54:50.389870848 +0000 UTC m=+4170.710081608" watchObservedRunningTime="2025-12-10 11:54:50.39957385 +0000 UTC m=+4170.719784610" Dec 10 11:54:55 crc kubenswrapper[4682]: I1210 11:54:55.971620 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:55 crc kubenswrapper[4682]: I1210 11:54:55.972088 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:56 crc kubenswrapper[4682]: I1210 11:54:56.020636 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:56 crc kubenswrapper[4682]: I1210 11:54:56.942699 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:54:56 crc kubenswrapper[4682]: I1210 11:54:56.988314 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q5wdq"] Dec 10 11:54:58 crc kubenswrapper[4682]: E1210 11:54:58.383649 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:54:58 crc kubenswrapper[4682]: I1210 11:54:58.446274 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q5wdq" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="registry-server" containerID="cri-o://21041cf6b480d3d397ad0844fd8e5102931eae6b059fca05f6ad037b8f9ae1ba" gracePeriod=2 Dec 10 11:54:59 crc kubenswrapper[4682]: E1210 11:54:59.383532 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:54:59 crc kubenswrapper[4682]: I1210 11:54:59.459699 4682 generic.go:334] "Generic (PLEG): container finished" podID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerID="21041cf6b480d3d397ad0844fd8e5102931eae6b059fca05f6ad037b8f9ae1ba" exitCode=0 Dec 10 11:54:59 crc kubenswrapper[4682]: I1210 11:54:59.459835 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerDied","Data":"21041cf6b480d3d397ad0844fd8e5102931eae6b059fca05f6ad037b8f9ae1ba"} Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.091973 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.123940 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-catalog-content\") pod \"d48b7c81-841f-4911-9fa2-19868f25d28e\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.123994 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-utilities\") pod \"d48b7c81-841f-4911-9fa2-19868f25d28e\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.124066 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk76q\" (UniqueName: \"kubernetes.io/projected/d48b7c81-841f-4911-9fa2-19868f25d28e-kube-api-access-zk76q\") pod \"d48b7c81-841f-4911-9fa2-19868f25d28e\" (UID: \"d48b7c81-841f-4911-9fa2-19868f25d28e\") " Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.125075 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-utilities" (OuterVolumeSpecName: "utilities") pod "d48b7c81-841f-4911-9fa2-19868f25d28e" (UID: "d48b7c81-841f-4911-9fa2-19868f25d28e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.147382 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d48b7c81-841f-4911-9fa2-19868f25d28e-kube-api-access-zk76q" (OuterVolumeSpecName: "kube-api-access-zk76q") pod "d48b7c81-841f-4911-9fa2-19868f25d28e" (UID: "d48b7c81-841f-4911-9fa2-19868f25d28e"). InnerVolumeSpecName "kube-api-access-zk76q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.181590 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d48b7c81-841f-4911-9fa2-19868f25d28e" (UID: "d48b7c81-841f-4911-9fa2-19868f25d28e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.226722 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.226751 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d48b7c81-841f-4911-9fa2-19868f25d28e-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.226762 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk76q\" (UniqueName: \"kubernetes.io/projected/d48b7c81-841f-4911-9fa2-19868f25d28e-kube-api-access-zk76q\") on node \"crc\" DevicePath \"\"" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.969032 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5wdq" event={"ID":"d48b7c81-841f-4911-9fa2-19868f25d28e","Type":"ContainerDied","Data":"1b5f560587957a9ef05cac90adf67db047435b419e21a3396da2fc9128e36084"} Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.969068 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5wdq" Dec 10 11:55:01 crc kubenswrapper[4682]: I1210 11:55:01.969349 4682 scope.go:117] "RemoveContainer" containerID="21041cf6b480d3d397ad0844fd8e5102931eae6b059fca05f6ad037b8f9ae1ba" Dec 10 11:55:02 crc kubenswrapper[4682]: I1210 11:55:02.002413 4682 scope.go:117] "RemoveContainer" containerID="4f84e7a6aeae9ae2684431b8f7b3d9af54df9466be82cb69ea1b0379ed0a3ad4" Dec 10 11:55:02 crc kubenswrapper[4682]: I1210 11:55:02.018598 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q5wdq"] Dec 10 11:55:02 crc kubenswrapper[4682]: I1210 11:55:02.023336 4682 scope.go:117] "RemoveContainer" containerID="15fa984ade8afde8acf4b27db935b493eedeadcbc61979684ed5237e9a63437f" Dec 10 11:55:02 crc kubenswrapper[4682]: I1210 11:55:02.032190 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q5wdq"] Dec 10 11:55:02 crc kubenswrapper[4682]: I1210 11:55:02.399223 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" path="/var/lib/kubelet/pods/d48b7c81-841f-4911-9fa2-19868f25d28e/volumes" Dec 10 11:55:06 crc kubenswrapper[4682]: I1210 11:55:06.478932 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:55:06 crc kubenswrapper[4682]: I1210 11:55:06.479440 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:55:06 crc kubenswrapper[4682]: I1210 11:55:06.479517 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:55:06 crc kubenswrapper[4682]: I1210 11:55:06.480329 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"60c3fbe6230de68ee431d21fa11b812406e0934693ca871cb3512f4172af1fe3"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:55:06 crc kubenswrapper[4682]: I1210 11:55:06.480387 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://60c3fbe6230de68ee431d21fa11b812406e0934693ca871cb3512f4172af1fe3" gracePeriod=600 Dec 10 11:55:07 crc kubenswrapper[4682]: I1210 11:55:07.034295 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="60c3fbe6230de68ee431d21fa11b812406e0934693ca871cb3512f4172af1fe3" exitCode=0 Dec 10 11:55:07 crc kubenswrapper[4682]: I1210 11:55:07.034373 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"60c3fbe6230de68ee431d21fa11b812406e0934693ca871cb3512f4172af1fe3"} Dec 10 11:55:07 crc kubenswrapper[4682]: I1210 11:55:07.034589 4682 scope.go:117] "RemoveContainer" containerID="08d4b4e7ada5f9522a5dc0ce8500c7f105dabf2bd0e31b414d295b9ca79bcb6a" Dec 10 11:55:08 crc kubenswrapper[4682]: I1210 11:55:08.046924 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49"} Dec 10 11:55:12 crc kubenswrapper[4682]: E1210 11:55:12.382604 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:55:13 crc kubenswrapper[4682]: E1210 11:55:13.383138 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:55:27 crc kubenswrapper[4682]: E1210 11:55:27.384029 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:55:28 crc kubenswrapper[4682]: E1210 11:55:28.385556 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:55:40 crc kubenswrapper[4682]: E1210 11:55:40.404700 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:55:42 crc kubenswrapper[4682]: E1210 11:55:42.384413 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.609451 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dj4h7"] Dec 10 11:55:44 crc kubenswrapper[4682]: E1210 11:55:44.610225 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="extract-utilities" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.610237 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="extract-utilities" Dec 10 11:55:44 crc kubenswrapper[4682]: E1210 11:55:44.610260 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="extract-content" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.610266 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="extract-content" Dec 10 11:55:44 crc kubenswrapper[4682]: E1210 11:55:44.610285 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="registry-server" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.610291 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="registry-server" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.610502 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="d48b7c81-841f-4911-9fa2-19868f25d28e" containerName="registry-server" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.613182 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.634545 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dj4h7"] Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.788539 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-catalog-content\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.788619 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg4zm\" (UniqueName: \"kubernetes.io/projected/c03fa782-3933-4182-9a3a-5b86059ed1f0-kube-api-access-qg4zm\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.788747 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-utilities\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.891268 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-catalog-content\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.891685 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg4zm\" (UniqueName: \"kubernetes.io/projected/c03fa782-3933-4182-9a3a-5b86059ed1f0-kube-api-access-qg4zm\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.891881 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-utilities\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.891971 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-catalog-content\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.892356 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-utilities\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.912414 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg4zm\" (UniqueName: \"kubernetes.io/projected/c03fa782-3933-4182-9a3a-5b86059ed1f0-kube-api-access-qg4zm\") pod \"redhat-operators-dj4h7\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:44 crc kubenswrapper[4682]: I1210 11:55:44.937757 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:45 crc kubenswrapper[4682]: I1210 11:55:45.422828 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dj4h7"] Dec 10 11:55:45 crc kubenswrapper[4682]: I1210 11:55:45.464129 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerStarted","Data":"a8c830e6d3c83fa3c78147132f2fdacd4918a7cc4cdeadefe3f30d544bd7b124"} Dec 10 11:55:46 crc kubenswrapper[4682]: I1210 11:55:46.480419 4682 generic.go:334] "Generic (PLEG): container finished" podID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerID="a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d" exitCode=0 Dec 10 11:55:46 crc kubenswrapper[4682]: I1210 11:55:46.480657 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerDied","Data":"a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d"} Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.005646 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qnp7k"] Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.009723 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.017769 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnp7k"] Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.075331 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-catalog-content\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.075405 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxsq4\" (UniqueName: \"kubernetes.io/projected/70acbcab-17e3-490c-8fef-dc1308d51820-kube-api-access-qxsq4\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.075504 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-utilities\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.177679 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxsq4\" (UniqueName: \"kubernetes.io/projected/70acbcab-17e3-490c-8fef-dc1308d51820-kube-api-access-qxsq4\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.177804 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-utilities\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.177884 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-catalog-content\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.178326 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-catalog-content\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.178803 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-utilities\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.198786 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxsq4\" (UniqueName: \"kubernetes.io/projected/70acbcab-17e3-490c-8fef-dc1308d51820-kube-api-access-qxsq4\") pod \"redhat-marketplace-qnp7k\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.340723 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.504858 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerStarted","Data":"df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52"} Dec 10 11:55:47 crc kubenswrapper[4682]: I1210 11:55:47.872619 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnp7k"] Dec 10 11:55:48 crc kubenswrapper[4682]: I1210 11:55:48.516799 4682 generic.go:334] "Generic (PLEG): container finished" podID="70acbcab-17e3-490c-8fef-dc1308d51820" containerID="b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb" exitCode=0 Dec 10 11:55:48 crc kubenswrapper[4682]: I1210 11:55:48.516900 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerDied","Data":"b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb"} Dec 10 11:55:48 crc kubenswrapper[4682]: I1210 11:55:48.517234 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerStarted","Data":"5525b32403eb78fffbf8858d0dd685f7aeb8e201d47dcacd4fb366b8ecaf0389"} Dec 10 11:55:49 crc kubenswrapper[4682]: I1210 11:55:49.531035 4682 generic.go:334] "Generic (PLEG): container finished" podID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerID="df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52" exitCode=0 Dec 10 11:55:49 crc kubenswrapper[4682]: I1210 11:55:49.531077 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerDied","Data":"df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52"} Dec 10 11:55:50 crc kubenswrapper[4682]: I1210 11:55:50.542435 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerStarted","Data":"68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca"} Dec 10 11:55:51 crc kubenswrapper[4682]: I1210 11:55:51.559244 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerStarted","Data":"3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527"} Dec 10 11:55:51 crc kubenswrapper[4682]: I1210 11:55:51.564301 4682 generic.go:334] "Generic (PLEG): container finished" podID="70acbcab-17e3-490c-8fef-dc1308d51820" containerID="68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca" exitCode=0 Dec 10 11:55:51 crc kubenswrapper[4682]: I1210 11:55:51.564358 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerDied","Data":"68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca"} Dec 10 11:55:51 crc kubenswrapper[4682]: I1210 11:55:51.603594 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dj4h7" podStartSLOduration=3.3295259010000002 podStartE2EDuration="7.603570582s" podCreationTimestamp="2025-12-10 11:55:44 +0000 UTC" firstStartedPulling="2025-12-10 11:55:46.48357376 +0000 UTC m=+4226.803784510" lastFinishedPulling="2025-12-10 11:55:50.757618441 +0000 UTC m=+4231.077829191" observedRunningTime="2025-12-10 11:55:51.587769099 +0000 UTC m=+4231.907979889" watchObservedRunningTime="2025-12-10 11:55:51.603570582 +0000 UTC m=+4231.923781342" Dec 10 11:55:52 crc kubenswrapper[4682]: I1210 11:55:52.573815 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerStarted","Data":"11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f"} Dec 10 11:55:52 crc kubenswrapper[4682]: I1210 11:55:52.598097 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qnp7k" podStartSLOduration=2.8618528039999998 podStartE2EDuration="6.59807372s" podCreationTimestamp="2025-12-10 11:55:46 +0000 UTC" firstStartedPulling="2025-12-10 11:55:48.519214421 +0000 UTC m=+4228.839425171" lastFinishedPulling="2025-12-10 11:55:52.255435327 +0000 UTC m=+4232.575646087" observedRunningTime="2025-12-10 11:55:52.589903175 +0000 UTC m=+4232.910113955" watchObservedRunningTime="2025-12-10 11:55:52.59807372 +0000 UTC m=+4232.918284470" Dec 10 11:55:54 crc kubenswrapper[4682]: I1210 11:55:54.938957 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:54 crc kubenswrapper[4682]: I1210 11:55:54.939198 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:55:55 crc kubenswrapper[4682]: E1210 11:55:55.385395 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:55:55 crc kubenswrapper[4682]: E1210 11:55:55.388045 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:55:55 crc kubenswrapper[4682]: I1210 11:55:55.992009 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dj4h7" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="registry-server" probeResult="failure" output=< Dec 10 11:55:55 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 11:55:55 crc kubenswrapper[4682]: > Dec 10 11:55:57 crc kubenswrapper[4682]: I1210 11:55:57.342151 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:57 crc kubenswrapper[4682]: I1210 11:55:57.342215 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:57 crc kubenswrapper[4682]: I1210 11:55:57.413882 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:57 crc kubenswrapper[4682]: I1210 11:55:57.663273 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:55:59 crc kubenswrapper[4682]: I1210 11:55:59.395828 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnp7k"] Dec 10 11:55:59 crc kubenswrapper[4682]: I1210 11:55:59.641976 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qnp7k" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="registry-server" containerID="cri-o://11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f" gracePeriod=2 Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.183721 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.271608 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-utilities\") pod \"70acbcab-17e3-490c-8fef-dc1308d51820\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.271860 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxsq4\" (UniqueName: \"kubernetes.io/projected/70acbcab-17e3-490c-8fef-dc1308d51820-kube-api-access-qxsq4\") pod \"70acbcab-17e3-490c-8fef-dc1308d51820\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.271899 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-catalog-content\") pod \"70acbcab-17e3-490c-8fef-dc1308d51820\" (UID: \"70acbcab-17e3-490c-8fef-dc1308d51820\") " Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.272884 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-utilities" (OuterVolumeSpecName: "utilities") pod "70acbcab-17e3-490c-8fef-dc1308d51820" (UID: "70acbcab-17e3-490c-8fef-dc1308d51820"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.278051 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70acbcab-17e3-490c-8fef-dc1308d51820-kube-api-access-qxsq4" (OuterVolumeSpecName: "kube-api-access-qxsq4") pod "70acbcab-17e3-490c-8fef-dc1308d51820" (UID: "70acbcab-17e3-490c-8fef-dc1308d51820"). InnerVolumeSpecName "kube-api-access-qxsq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.292098 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70acbcab-17e3-490c-8fef-dc1308d51820" (UID: "70acbcab-17e3-490c-8fef-dc1308d51820"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.374296 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxsq4\" (UniqueName: \"kubernetes.io/projected/70acbcab-17e3-490c-8fef-dc1308d51820-kube-api-access-qxsq4\") on node \"crc\" DevicePath \"\"" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.374325 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.374334 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70acbcab-17e3-490c-8fef-dc1308d51820-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.654141 4682 generic.go:334] "Generic (PLEG): container finished" podID="70acbcab-17e3-490c-8fef-dc1308d51820" containerID="11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f" exitCode=0 Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.654189 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerDied","Data":"11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f"} Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.654219 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnp7k" event={"ID":"70acbcab-17e3-490c-8fef-dc1308d51820","Type":"ContainerDied","Data":"5525b32403eb78fffbf8858d0dd685f7aeb8e201d47dcacd4fb366b8ecaf0389"} Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.654230 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnp7k" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.654238 4682 scope.go:117] "RemoveContainer" containerID="11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.679370 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnp7k"] Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.682053 4682 scope.go:117] "RemoveContainer" containerID="68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.688155 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnp7k"] Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.794072 4682 scope.go:117] "RemoveContainer" containerID="b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.822980 4682 scope.go:117] "RemoveContainer" containerID="11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f" Dec 10 11:56:00 crc kubenswrapper[4682]: E1210 11:56:00.823560 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f\": container with ID starting with 11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f not found: ID does not exist" containerID="11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.823620 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f"} err="failed to get container status \"11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f\": rpc error: code = NotFound desc = could not find container \"11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f\": container with ID starting with 11b8c6d5047bc8b229657f9d11a11b571fd11c5d2ce1c409b39eb1d06f67f81f not found: ID does not exist" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.823650 4682 scope.go:117] "RemoveContainer" containerID="68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca" Dec 10 11:56:00 crc kubenswrapper[4682]: E1210 11:56:00.824029 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca\": container with ID starting with 68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca not found: ID does not exist" containerID="68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.824078 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca"} err="failed to get container status \"68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca\": rpc error: code = NotFound desc = could not find container \"68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca\": container with ID starting with 68f038a52309f0e09de4155097ec5efcca306c29d263fdf068514b5f5267b3ca not found: ID does not exist" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.824113 4682 scope.go:117] "RemoveContainer" containerID="b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb" Dec 10 11:56:00 crc kubenswrapper[4682]: E1210 11:56:00.824604 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb\": container with ID starting with b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb not found: ID does not exist" containerID="b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb" Dec 10 11:56:00 crc kubenswrapper[4682]: I1210 11:56:00.824646 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb"} err="failed to get container status \"b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb\": rpc error: code = NotFound desc = could not find container \"b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb\": container with ID starting with b036be798e88f695a58d246c42cf7113584cf86ed3ad472a2ff9cda247ff73cb not found: ID does not exist" Dec 10 11:56:02 crc kubenswrapper[4682]: I1210 11:56:02.394216 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" path="/var/lib/kubelet/pods/70acbcab-17e3-490c-8fef-dc1308d51820/volumes" Dec 10 11:56:04 crc kubenswrapper[4682]: I1210 11:56:04.995046 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:56:05 crc kubenswrapper[4682]: I1210 11:56:05.049634 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:56:05 crc kubenswrapper[4682]: I1210 11:56:05.233955 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dj4h7"] Dec 10 11:56:06 crc kubenswrapper[4682]: I1210 11:56:06.721619 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dj4h7" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="registry-server" containerID="cri-o://3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527" gracePeriod=2 Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.236326 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.322965 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-catalog-content\") pod \"c03fa782-3933-4182-9a3a-5b86059ed1f0\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.323163 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg4zm\" (UniqueName: \"kubernetes.io/projected/c03fa782-3933-4182-9a3a-5b86059ed1f0-kube-api-access-qg4zm\") pod \"c03fa782-3933-4182-9a3a-5b86059ed1f0\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.323349 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-utilities\") pod \"c03fa782-3933-4182-9a3a-5b86059ed1f0\" (UID: \"c03fa782-3933-4182-9a3a-5b86059ed1f0\") " Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.324494 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-utilities" (OuterVolumeSpecName: "utilities") pod "c03fa782-3933-4182-9a3a-5b86059ed1f0" (UID: "c03fa782-3933-4182-9a3a-5b86059ed1f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.329740 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03fa782-3933-4182-9a3a-5b86059ed1f0-kube-api-access-qg4zm" (OuterVolumeSpecName: "kube-api-access-qg4zm") pod "c03fa782-3933-4182-9a3a-5b86059ed1f0" (UID: "c03fa782-3933-4182-9a3a-5b86059ed1f0"). InnerVolumeSpecName "kube-api-access-qg4zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:56:07 crc kubenswrapper[4682]: E1210 11:56:07.383812 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.428588 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg4zm\" (UniqueName: \"kubernetes.io/projected/c03fa782-3933-4182-9a3a-5b86059ed1f0-kube-api-access-qg4zm\") on node \"crc\" DevicePath \"\"" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.428640 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.474312 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c03fa782-3933-4182-9a3a-5b86059ed1f0" (UID: "c03fa782-3933-4182-9a3a-5b86059ed1f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.533562 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c03fa782-3933-4182-9a3a-5b86059ed1f0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.733819 4682 generic.go:334] "Generic (PLEG): container finished" podID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerID="3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527" exitCode=0 Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.733881 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dj4h7" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.733933 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerDied","Data":"3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527"} Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.734757 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dj4h7" event={"ID":"c03fa782-3933-4182-9a3a-5b86059ed1f0","Type":"ContainerDied","Data":"a8c830e6d3c83fa3c78147132f2fdacd4918a7cc4cdeadefe3f30d544bd7b124"} Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.734807 4682 scope.go:117] "RemoveContainer" containerID="3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.767620 4682 scope.go:117] "RemoveContainer" containerID="df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.774816 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dj4h7"] Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.794049 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dj4h7"] Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.816540 4682 scope.go:117] "RemoveContainer" containerID="a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.867934 4682 scope.go:117] "RemoveContainer" containerID="3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527" Dec 10 11:56:07 crc kubenswrapper[4682]: E1210 11:56:07.868365 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527\": container with ID starting with 3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527 not found: ID does not exist" containerID="3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.868408 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527"} err="failed to get container status \"3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527\": rpc error: code = NotFound desc = could not find container \"3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527\": container with ID starting with 3697d80144e107e014c17e71c9e4ea0f15a6374c175cd9679bf840cd8251e527 not found: ID does not exist" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.868433 4682 scope.go:117] "RemoveContainer" containerID="df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52" Dec 10 11:56:07 crc kubenswrapper[4682]: E1210 11:56:07.869006 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52\": container with ID starting with df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52 not found: ID does not exist" containerID="df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.869035 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52"} err="failed to get container status \"df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52\": rpc error: code = NotFound desc = could not find container \"df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52\": container with ID starting with df11f204517c03844745660c1d6c16746734fab9389222be803c1b6608758c52 not found: ID does not exist" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.869054 4682 scope.go:117] "RemoveContainer" containerID="a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d" Dec 10 11:56:07 crc kubenswrapper[4682]: E1210 11:56:07.869408 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d\": container with ID starting with a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d not found: ID does not exist" containerID="a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d" Dec 10 11:56:07 crc kubenswrapper[4682]: I1210 11:56:07.869446 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d"} err="failed to get container status \"a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d\": rpc error: code = NotFound desc = could not find container \"a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d\": container with ID starting with a708ee55a38ee6155a3e5b796b9ddc164470f6d59fdbf6553b0a9786ca4cbf1d not found: ID does not exist" Dec 10 11:56:08 crc kubenswrapper[4682]: I1210 11:56:08.418340 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" path="/var/lib/kubelet/pods/c03fa782-3933-4182-9a3a-5b86059ed1f0/volumes" Dec 10 11:56:10 crc kubenswrapper[4682]: E1210 11:56:10.395536 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:56:19 crc kubenswrapper[4682]: E1210 11:56:19.385170 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:56:21 crc kubenswrapper[4682]: E1210 11:56:21.382961 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:56:31 crc kubenswrapper[4682]: E1210 11:56:31.383623 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:56:36 crc kubenswrapper[4682]: E1210 11:56:36.383028 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:56:45 crc kubenswrapper[4682]: E1210 11:56:45.382539 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:56:51 crc kubenswrapper[4682]: E1210 11:56:51.383700 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:56:56 crc kubenswrapper[4682]: E1210 11:56:56.382611 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:57:04 crc kubenswrapper[4682]: E1210 11:57:04.384574 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:57:07 crc kubenswrapper[4682]: E1210 11:57:07.384126 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:57:16 crc kubenswrapper[4682]: E1210 11:57:16.383209 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:57:19 crc kubenswrapper[4682]: E1210 11:57:19.383210 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:57:29 crc kubenswrapper[4682]: E1210 11:57:29.382553 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:57:30 crc kubenswrapper[4682]: E1210 11:57:30.393166 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:57:36 crc kubenswrapper[4682]: I1210 11:57:36.478976 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:57:36 crc kubenswrapper[4682]: I1210 11:57:36.479614 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:57:42 crc kubenswrapper[4682]: E1210 11:57:42.383770 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:57:43 crc kubenswrapper[4682]: E1210 11:57:43.383706 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:57:55 crc kubenswrapper[4682]: E1210 11:57:55.383859 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:57:56 crc kubenswrapper[4682]: I1210 11:57:56.384819 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 11:57:56 crc kubenswrapper[4682]: E1210 11:57:56.522934 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:57:56 crc kubenswrapper[4682]: E1210 11:57:56.523005 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 11:57:56 crc kubenswrapper[4682]: E1210 11:57:56.523147 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:57:56 crc kubenswrapper[4682]: E1210 11:57:56.524345 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:58:06 crc kubenswrapper[4682]: I1210 11:58:06.478634 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:58:06 crc kubenswrapper[4682]: I1210 11:58:06.479233 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:58:09 crc kubenswrapper[4682]: E1210 11:58:09.384143 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:58:09 crc kubenswrapper[4682]: E1210 11:58:09.512946 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:58:09 crc kubenswrapper[4682]: E1210 11:58:09.513014 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 11:58:09 crc kubenswrapper[4682]: E1210 11:58:09.513173 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 11:58:09 crc kubenswrapper[4682]: E1210 11:58:09.515435 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:58:21 crc kubenswrapper[4682]: E1210 11:58:21.385372 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:58:21 crc kubenswrapper[4682]: E1210 11:58:21.385994 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:58:32 crc kubenswrapper[4682]: E1210 11:58:32.383127 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:58:35 crc kubenswrapper[4682]: E1210 11:58:35.383395 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:58:36 crc kubenswrapper[4682]: I1210 11:58:36.478887 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 11:58:36 crc kubenswrapper[4682]: I1210 11:58:36.479360 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 11:58:36 crc kubenswrapper[4682]: I1210 11:58:36.479442 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 11:58:36 crc kubenswrapper[4682]: I1210 11:58:36.480518 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 11:58:36 crc kubenswrapper[4682]: I1210 11:58:36.480632 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" gracePeriod=600 Dec 10 11:58:36 crc kubenswrapper[4682]: E1210 11:58:36.614736 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:58:37 crc kubenswrapper[4682]: I1210 11:58:37.393152 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" exitCode=0 Dec 10 11:58:37 crc kubenswrapper[4682]: I1210 11:58:37.393209 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49"} Dec 10 11:58:37 crc kubenswrapper[4682]: I1210 11:58:37.393245 4682 scope.go:117] "RemoveContainer" containerID="60c3fbe6230de68ee431d21fa11b812406e0934693ca871cb3512f4172af1fe3" Dec 10 11:58:37 crc kubenswrapper[4682]: I1210 11:58:37.396286 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:58:37 crc kubenswrapper[4682]: E1210 11:58:37.396911 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.688160 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rtvmd"] Dec 10 11:58:38 crc kubenswrapper[4682]: E1210 11:58:38.688973 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="extract-utilities" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.688989 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="extract-utilities" Dec 10 11:58:38 crc kubenswrapper[4682]: E1210 11:58:38.689003 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="registry-server" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689009 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="registry-server" Dec 10 11:58:38 crc kubenswrapper[4682]: E1210 11:58:38.689037 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="extract-content" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689043 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="extract-content" Dec 10 11:58:38 crc kubenswrapper[4682]: E1210 11:58:38.689064 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="registry-server" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689070 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="registry-server" Dec 10 11:58:38 crc kubenswrapper[4682]: E1210 11:58:38.689085 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="extract-utilities" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689090 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="extract-utilities" Dec 10 11:58:38 crc kubenswrapper[4682]: E1210 11:58:38.689108 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="extract-content" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689113 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="extract-content" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689338 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c03fa782-3933-4182-9a3a-5b86059ed1f0" containerName="registry-server" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.689362 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="70acbcab-17e3-490c-8fef-dc1308d51820" containerName="registry-server" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.691205 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.693235 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-catalog-content\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.693491 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79ljk\" (UniqueName: \"kubernetes.io/projected/0e780e89-e9fb-45c5-82a5-bde9ca521360-kube-api-access-79ljk\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.693608 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-utilities\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.707183 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rtvmd"] Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.795693 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79ljk\" (UniqueName: \"kubernetes.io/projected/0e780e89-e9fb-45c5-82a5-bde9ca521360-kube-api-access-79ljk\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.796007 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-utilities\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.796101 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-catalog-content\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.796586 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-utilities\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.796620 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-catalog-content\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:38 crc kubenswrapper[4682]: I1210 11:58:38.816080 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79ljk\" (UniqueName: \"kubernetes.io/projected/0e780e89-e9fb-45c5-82a5-bde9ca521360-kube-api-access-79ljk\") pod \"community-operators-rtvmd\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:39 crc kubenswrapper[4682]: I1210 11:58:39.035175 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:39 crc kubenswrapper[4682]: I1210 11:58:39.561017 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rtvmd"] Dec 10 11:58:39 crc kubenswrapper[4682]: W1210 11:58:39.566903 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e780e89_e9fb_45c5_82a5_bde9ca521360.slice/crio-67e427253b98fb2022700e5e2a184517c19dbd34a012a0d8fc3dd3010c1b7859 WatchSource:0}: Error finding container 67e427253b98fb2022700e5e2a184517c19dbd34a012a0d8fc3dd3010c1b7859: Status 404 returned error can't find the container with id 67e427253b98fb2022700e5e2a184517c19dbd34a012a0d8fc3dd3010c1b7859 Dec 10 11:58:40 crc kubenswrapper[4682]: I1210 11:58:40.425646 4682 generic.go:334] "Generic (PLEG): container finished" podID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerID="e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96" exitCode=0 Dec 10 11:58:40 crc kubenswrapper[4682]: I1210 11:58:40.425709 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerDied","Data":"e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96"} Dec 10 11:58:40 crc kubenswrapper[4682]: I1210 11:58:40.425927 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerStarted","Data":"67e427253b98fb2022700e5e2a184517c19dbd34a012a0d8fc3dd3010c1b7859"} Dec 10 11:58:42 crc kubenswrapper[4682]: I1210 11:58:42.451990 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerStarted","Data":"89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a"} Dec 10 11:58:43 crc kubenswrapper[4682]: I1210 11:58:43.463718 4682 generic.go:334] "Generic (PLEG): container finished" podID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerID="89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a" exitCode=0 Dec 10 11:58:43 crc kubenswrapper[4682]: I1210 11:58:43.463769 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerDied","Data":"89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a"} Dec 10 11:58:45 crc kubenswrapper[4682]: I1210 11:58:45.485144 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerStarted","Data":"e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b"} Dec 10 11:58:45 crc kubenswrapper[4682]: I1210 11:58:45.504970 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rtvmd" podStartSLOduration=3.615308163 podStartE2EDuration="7.504952948s" podCreationTimestamp="2025-12-10 11:58:38 +0000 UTC" firstStartedPulling="2025-12-10 11:58:40.429459662 +0000 UTC m=+4400.749670422" lastFinishedPulling="2025-12-10 11:58:44.319104457 +0000 UTC m=+4404.639315207" observedRunningTime="2025-12-10 11:58:45.502450599 +0000 UTC m=+4405.822661369" watchObservedRunningTime="2025-12-10 11:58:45.504952948 +0000 UTC m=+4405.825163698" Dec 10 11:58:46 crc kubenswrapper[4682]: E1210 11:58:46.382669 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:58:49 crc kubenswrapper[4682]: I1210 11:58:49.036167 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:49 crc kubenswrapper[4682]: I1210 11:58:49.036564 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:49 crc kubenswrapper[4682]: I1210 11:58:49.082134 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:49 crc kubenswrapper[4682]: I1210 11:58:49.578484 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:49 crc kubenswrapper[4682]: I1210 11:58:49.625865 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rtvmd"] Dec 10 11:58:50 crc kubenswrapper[4682]: E1210 11:58:50.390463 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:58:51 crc kubenswrapper[4682]: I1210 11:58:51.541664 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rtvmd" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="registry-server" containerID="cri-o://e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b" gracePeriod=2 Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.083165 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.106794 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-utilities\") pod \"0e780e89-e9fb-45c5-82a5-bde9ca521360\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.106895 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-catalog-content\") pod \"0e780e89-e9fb-45c5-82a5-bde9ca521360\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.107032 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79ljk\" (UniqueName: \"kubernetes.io/projected/0e780e89-e9fb-45c5-82a5-bde9ca521360-kube-api-access-79ljk\") pod \"0e780e89-e9fb-45c5-82a5-bde9ca521360\" (UID: \"0e780e89-e9fb-45c5-82a5-bde9ca521360\") " Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.108793 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-utilities" (OuterVolumeSpecName: "utilities") pod "0e780e89-e9fb-45c5-82a5-bde9ca521360" (UID: "0e780e89-e9fb-45c5-82a5-bde9ca521360"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.120873 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e780e89-e9fb-45c5-82a5-bde9ca521360-kube-api-access-79ljk" (OuterVolumeSpecName: "kube-api-access-79ljk") pod "0e780e89-e9fb-45c5-82a5-bde9ca521360" (UID: "0e780e89-e9fb-45c5-82a5-bde9ca521360"). InnerVolumeSpecName "kube-api-access-79ljk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.187158 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e780e89-e9fb-45c5-82a5-bde9ca521360" (UID: "0e780e89-e9fb-45c5-82a5-bde9ca521360"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.209488 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.209524 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e780e89-e9fb-45c5-82a5-bde9ca521360-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.209541 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79ljk\" (UniqueName: \"kubernetes.io/projected/0e780e89-e9fb-45c5-82a5-bde9ca521360-kube-api-access-79ljk\") on node \"crc\" DevicePath \"\"" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.380959 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:58:52 crc kubenswrapper[4682]: E1210 11:58:52.381578 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.553697 4682 generic.go:334] "Generic (PLEG): container finished" podID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerID="e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b" exitCode=0 Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.553750 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerDied","Data":"e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b"} Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.553802 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtvmd" event={"ID":"0e780e89-e9fb-45c5-82a5-bde9ca521360","Type":"ContainerDied","Data":"67e427253b98fb2022700e5e2a184517c19dbd34a012a0d8fc3dd3010c1b7859"} Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.553798 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtvmd" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.553823 4682 scope.go:117] "RemoveContainer" containerID="e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.583022 4682 scope.go:117] "RemoveContainer" containerID="89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.585777 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rtvmd"] Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.596071 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rtvmd"] Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.608379 4682 scope.go:117] "RemoveContainer" containerID="e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.670741 4682 scope.go:117] "RemoveContainer" containerID="e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b" Dec 10 11:58:52 crc kubenswrapper[4682]: E1210 11:58:52.671419 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b\": container with ID starting with e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b not found: ID does not exist" containerID="e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.671453 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b"} err="failed to get container status \"e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b\": rpc error: code = NotFound desc = could not find container \"e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b\": container with ID starting with e606fbe474781097b1bc4c4002e74e87453aa9713b6240fe3fe3816a92ebe19b not found: ID does not exist" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.671544 4682 scope.go:117] "RemoveContainer" containerID="89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a" Dec 10 11:58:52 crc kubenswrapper[4682]: E1210 11:58:52.671890 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a\": container with ID starting with 89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a not found: ID does not exist" containerID="89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.672006 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a"} err="failed to get container status \"89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a\": rpc error: code = NotFound desc = could not find container \"89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a\": container with ID starting with 89b74336c4c2af7b86a59f9c2560241a8fdd9e6da0ecfc6f3ef5397374c8bf6a not found: ID does not exist" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.672095 4682 scope.go:117] "RemoveContainer" containerID="e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96" Dec 10 11:58:52 crc kubenswrapper[4682]: E1210 11:58:52.672495 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96\": container with ID starting with e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96 not found: ID does not exist" containerID="e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96" Dec 10 11:58:52 crc kubenswrapper[4682]: I1210 11:58:52.672614 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96"} err="failed to get container status \"e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96\": rpc error: code = NotFound desc = could not find container \"e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96\": container with ID starting with e3c081b53eb3bbcf4e8fbf1a1e3d6dbde6e35efcb04d69e248c1cf1a3f14eb96 not found: ID does not exist" Dec 10 11:58:54 crc kubenswrapper[4682]: I1210 11:58:54.392639 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" path="/var/lib/kubelet/pods/0e780e89-e9fb-45c5-82a5-bde9ca521360/volumes" Dec 10 11:59:01 crc kubenswrapper[4682]: E1210 11:59:01.383709 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:59:03 crc kubenswrapper[4682]: I1210 11:59:03.381177 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:59:03 crc kubenswrapper[4682]: E1210 11:59:03.382495 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:59:05 crc kubenswrapper[4682]: E1210 11:59:05.383266 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:59:16 crc kubenswrapper[4682]: E1210 11:59:16.386221 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:59:16 crc kubenswrapper[4682]: E1210 11:59:16.386239 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:59:18 crc kubenswrapper[4682]: I1210 11:59:18.382538 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:59:18 crc kubenswrapper[4682]: E1210 11:59:18.382972 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:59:27 crc kubenswrapper[4682]: E1210 11:59:27.382511 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:59:31 crc kubenswrapper[4682]: E1210 11:59:31.384718 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:59:32 crc kubenswrapper[4682]: I1210 11:59:32.381683 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:59:32 crc kubenswrapper[4682]: E1210 11:59:32.382878 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:59:41 crc kubenswrapper[4682]: E1210 11:59:41.383696 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:59:43 crc kubenswrapper[4682]: E1210 11:59:43.384046 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:59:44 crc kubenswrapper[4682]: I1210 11:59:44.382005 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:59:44 crc kubenswrapper[4682]: E1210 11:59:44.382443 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 11:59:50 crc kubenswrapper[4682]: I1210 11:59:50.092796 4682 generic.go:334] "Generic (PLEG): container finished" podID="8d02d5aa-758d-49b4-aa9e-77062c9af129" containerID="02d90643b99baac2d4922d2e4c9943a48219e3610b3fadaec1a34383a4e13946" exitCode=2 Dec 10 11:59:50 crc kubenswrapper[4682]: I1210 11:59:50.092897 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" event={"ID":"8d02d5aa-758d-49b4-aa9e-77062c9af129","Type":"ContainerDied","Data":"02d90643b99baac2d4922d2e4c9943a48219e3610b3fadaec1a34383a4e13946"} Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.579743 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.713982 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hg5n\" (UniqueName: \"kubernetes.io/projected/8d02d5aa-758d-49b4-aa9e-77062c9af129-kube-api-access-9hg5n\") pod \"8d02d5aa-758d-49b4-aa9e-77062c9af129\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.714018 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-ssh-key\") pod \"8d02d5aa-758d-49b4-aa9e-77062c9af129\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.714155 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-inventory\") pod \"8d02d5aa-758d-49b4-aa9e-77062c9af129\" (UID: \"8d02d5aa-758d-49b4-aa9e-77062c9af129\") " Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.731846 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d02d5aa-758d-49b4-aa9e-77062c9af129-kube-api-access-9hg5n" (OuterVolumeSpecName: "kube-api-access-9hg5n") pod "8d02d5aa-758d-49b4-aa9e-77062c9af129" (UID: "8d02d5aa-758d-49b4-aa9e-77062c9af129"). InnerVolumeSpecName "kube-api-access-9hg5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.743162 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8d02d5aa-758d-49b4-aa9e-77062c9af129" (UID: "8d02d5aa-758d-49b4-aa9e-77062c9af129"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.745031 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-inventory" (OuterVolumeSpecName: "inventory") pod "8d02d5aa-758d-49b4-aa9e-77062c9af129" (UID: "8d02d5aa-758d-49b4-aa9e-77062c9af129"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.817256 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hg5n\" (UniqueName: \"kubernetes.io/projected/8d02d5aa-758d-49b4-aa9e-77062c9af129-kube-api-access-9hg5n\") on node \"crc\" DevicePath \"\"" Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.817292 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 11:59:51 crc kubenswrapper[4682]: I1210 11:59:51.817307 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d02d5aa-758d-49b4-aa9e-77062c9af129-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 11:59:52 crc kubenswrapper[4682]: I1210 11:59:52.123031 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" event={"ID":"8d02d5aa-758d-49b4-aa9e-77062c9af129","Type":"ContainerDied","Data":"0c2bf19e16cd77b9c33743201b717217c98ad5ec919d0e9e659cf4095385e98b"} Dec 10 11:59:52 crc kubenswrapper[4682]: I1210 11:59:52.123079 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c2bf19e16cd77b9c33743201b717217c98ad5ec919d0e9e659cf4095385e98b" Dec 10 11:59:52 crc kubenswrapper[4682]: I1210 11:59:52.123149 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-smtgq" Dec 10 11:59:54 crc kubenswrapper[4682]: E1210 11:59:54.383523 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 11:59:55 crc kubenswrapper[4682]: E1210 11:59:55.385191 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 11:59:56 crc kubenswrapper[4682]: I1210 11:59:56.381265 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 11:59:56 crc kubenswrapper[4682]: E1210 11:59:56.381915 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.185998 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp"] Dec 10 12:00:00 crc kubenswrapper[4682]: E1210 12:00:00.186827 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d02d5aa-758d-49b4-aa9e-77062c9af129" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.186846 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d02d5aa-758d-49b4-aa9e-77062c9af129" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:00:00 crc kubenswrapper[4682]: E1210 12:00:00.186865 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="extract-utilities" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.186872 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="extract-utilities" Dec 10 12:00:00 crc kubenswrapper[4682]: E1210 12:00:00.186912 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="registry-server" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.186919 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="registry-server" Dec 10 12:00:00 crc kubenswrapper[4682]: E1210 12:00:00.186934 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="extract-content" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.186942 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="extract-content" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.187182 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e780e89-e9fb-45c5-82a5-bde9ca521360" containerName="registry-server" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.187208 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d02d5aa-758d-49b4-aa9e-77062c9af129" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.188072 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.190454 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.191978 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.198633 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp"] Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.296017 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4lg8\" (UniqueName: \"kubernetes.io/projected/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-kube-api-access-l4lg8\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.296648 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-config-volume\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.297417 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-secret-volume\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.399522 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-secret-volume\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.399926 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4lg8\" (UniqueName: \"kubernetes.io/projected/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-kube-api-access-l4lg8\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.400003 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-config-volume\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.401065 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-config-volume\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.405509 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-secret-volume\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.416921 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4lg8\" (UniqueName: \"kubernetes.io/projected/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-kube-api-access-l4lg8\") pod \"collect-profiles-29422800-wljwp\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:00 crc kubenswrapper[4682]: I1210 12:00:00.517716 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:01 crc kubenswrapper[4682]: I1210 12:00:01.051687 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp"] Dec 10 12:00:01 crc kubenswrapper[4682]: I1210 12:00:01.210572 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" event={"ID":"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78","Type":"ContainerStarted","Data":"268c1a45d7324d5537059eb7d1fe1b716f8e6336f4a35487de9e08671036563d"} Dec 10 12:00:02 crc kubenswrapper[4682]: I1210 12:00:02.226101 4682 generic.go:334] "Generic (PLEG): container finished" podID="95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" containerID="791ceb30d83d1ad9a4885705fa51c19daac6606e6803d8d28553aed9d1691138" exitCode=0 Dec 10 12:00:02 crc kubenswrapper[4682]: I1210 12:00:02.226160 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" event={"ID":"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78","Type":"ContainerDied","Data":"791ceb30d83d1ad9a4885705fa51c19daac6606e6803d8d28553aed9d1691138"} Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.665721 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.775806 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-config-volume\") pod \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.776297 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-secret-volume\") pod \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.776409 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4lg8\" (UniqueName: \"kubernetes.io/projected/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-kube-api-access-l4lg8\") pod \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\" (UID: \"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78\") " Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.776728 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-config-volume" (OuterVolumeSpecName: "config-volume") pod "95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" (UID: "95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.777210 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.781719 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-kube-api-access-l4lg8" (OuterVolumeSpecName: "kube-api-access-l4lg8") pod "95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" (UID: "95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78"). InnerVolumeSpecName "kube-api-access-l4lg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.782348 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" (UID: "95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.879592 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:00:03 crc kubenswrapper[4682]: I1210 12:00:03.879631 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4lg8\" (UniqueName: \"kubernetes.io/projected/95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78-kube-api-access-l4lg8\") on node \"crc\" DevicePath \"\"" Dec 10 12:00:04 crc kubenswrapper[4682]: I1210 12:00:04.251537 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" event={"ID":"95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78","Type":"ContainerDied","Data":"268c1a45d7324d5537059eb7d1fe1b716f8e6336f4a35487de9e08671036563d"} Dec 10 12:00:04 crc kubenswrapper[4682]: I1210 12:00:04.251875 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="268c1a45d7324d5537059eb7d1fe1b716f8e6336f4a35487de9e08671036563d" Dec 10 12:00:04 crc kubenswrapper[4682]: I1210 12:00:04.251734 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422800-wljwp" Dec 10 12:00:04 crc kubenswrapper[4682]: I1210 12:00:04.544172 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6"] Dec 10 12:00:04 crc kubenswrapper[4682]: I1210 12:00:04.552080 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422755-p9dh6"] Dec 10 12:00:06 crc kubenswrapper[4682]: E1210 12:00:06.382872 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:00:06 crc kubenswrapper[4682]: I1210 12:00:06.395938 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e" path="/var/lib/kubelet/pods/8650823f-d7b3-4d4f-b3e7-b7e92e4ed91e/volumes" Dec 10 12:00:07 crc kubenswrapper[4682]: E1210 12:00:07.382220 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:00:09 crc kubenswrapper[4682]: I1210 12:00:09.381641 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:00:09 crc kubenswrapper[4682]: E1210 12:00:09.382393 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:00:18 crc kubenswrapper[4682]: E1210 12:00:18.385360 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:00:21 crc kubenswrapper[4682]: E1210 12:00:21.384598 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:00:23 crc kubenswrapper[4682]: I1210 12:00:23.381548 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:00:23 crc kubenswrapper[4682]: E1210 12:00:23.382602 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:00:32 crc kubenswrapper[4682]: E1210 12:00:32.387907 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:00:33 crc kubenswrapper[4682]: E1210 12:00:33.382313 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:00:36 crc kubenswrapper[4682]: I1210 12:00:36.004674 4682 scope.go:117] "RemoveContainer" containerID="91fcf5bb8900f93cf03724fa918cacfd36601d4841d1a4e73e322d60080f6b04" Dec 10 12:00:36 crc kubenswrapper[4682]: I1210 12:00:36.380880 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:00:36 crc kubenswrapper[4682]: E1210 12:00:36.381411 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:00:47 crc kubenswrapper[4682]: E1210 12:00:47.383569 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:00:47 crc kubenswrapper[4682]: E1210 12:00:47.383840 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:00:49 crc kubenswrapper[4682]: I1210 12:00:49.381927 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:00:49 crc kubenswrapper[4682]: E1210 12:00:49.382583 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:00:58 crc kubenswrapper[4682]: E1210 12:00:58.385599 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.163101 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29422801-lnk8m"] Dec 10 12:01:00 crc kubenswrapper[4682]: E1210 12:01:00.163832 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" containerName="collect-profiles" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.163845 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" containerName="collect-profiles" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.164073 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="95f03e61-5ef0-4a99-b39e-bfa5bbbf0e78" containerName="collect-profiles" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.164799 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.347599 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-combined-ca-bundle\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.347656 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-config-data\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.347896 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-fernet-keys\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.348110 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjg4q\" (UniqueName: \"kubernetes.io/projected/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-kube-api-access-wjg4q\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.450513 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-combined-ca-bundle\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.450557 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-config-data\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.450646 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-fernet-keys\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.450684 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjg4q\" (UniqueName: \"kubernetes.io/projected/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-kube-api-access-wjg4q\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.457793 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-combined-ca-bundle\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.457851 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-config-data\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.458203 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-fernet-keys\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.727829 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjg4q\" (UniqueName: \"kubernetes.io/projected/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-kube-api-access-wjg4q\") pod \"keystone-cron-29422801-lnk8m\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.746304 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422801-lnk8m"] Dec 10 12:01:00 crc kubenswrapper[4682]: I1210 12:01:00.786715 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:01 crc kubenswrapper[4682]: I1210 12:01:01.270106 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422801-lnk8m"] Dec 10 12:01:01 crc kubenswrapper[4682]: I1210 12:01:01.796424 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-lnk8m" event={"ID":"da98ce99-0e03-465a-9d86-0e3cb7fbcb59","Type":"ContainerStarted","Data":"27c156b1ef0d96a1644103d360f09ba53f98ea787b6102dac63b73acf5a27b88"} Dec 10 12:01:02 crc kubenswrapper[4682]: E1210 12:01:02.382091 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:01:02 crc kubenswrapper[4682]: I1210 12:01:02.805530 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-lnk8m" event={"ID":"da98ce99-0e03-465a-9d86-0e3cb7fbcb59","Type":"ContainerStarted","Data":"851234808dacb5209a6862f58dcae58522e47dfc27d867afb7ae89a64b60fae6"} Dec 10 12:01:02 crc kubenswrapper[4682]: I1210 12:01:02.830728 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29422801-lnk8m" podStartSLOduration=2.830709861 podStartE2EDuration="2.830709861s" podCreationTimestamp="2025-12-10 12:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 12:01:02.820368527 +0000 UTC m=+4543.140579337" watchObservedRunningTime="2025-12-10 12:01:02.830709861 +0000 UTC m=+4543.150920601" Dec 10 12:01:03 crc kubenswrapper[4682]: I1210 12:01:03.385301 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:01:03 crc kubenswrapper[4682]: E1210 12:01:03.385680 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:01:04 crc kubenswrapper[4682]: I1210 12:01:04.825044 4682 generic.go:334] "Generic (PLEG): container finished" podID="da98ce99-0e03-465a-9d86-0e3cb7fbcb59" containerID="851234808dacb5209a6862f58dcae58522e47dfc27d867afb7ae89a64b60fae6" exitCode=0 Dec 10 12:01:04 crc kubenswrapper[4682]: I1210 12:01:04.825107 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-lnk8m" event={"ID":"da98ce99-0e03-465a-9d86-0e3cb7fbcb59","Type":"ContainerDied","Data":"851234808dacb5209a6862f58dcae58522e47dfc27d867afb7ae89a64b60fae6"} Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.293806 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.486257 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjg4q\" (UniqueName: \"kubernetes.io/projected/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-kube-api-access-wjg4q\") pod \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.486418 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-config-data\") pod \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.486530 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-combined-ca-bundle\") pod \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.486589 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-fernet-keys\") pod \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\" (UID: \"da98ce99-0e03-465a-9d86-0e3cb7fbcb59\") " Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.492484 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-kube-api-access-wjg4q" (OuterVolumeSpecName: "kube-api-access-wjg4q") pod "da98ce99-0e03-465a-9d86-0e3cb7fbcb59" (UID: "da98ce99-0e03-465a-9d86-0e3cb7fbcb59"). InnerVolumeSpecName "kube-api-access-wjg4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.494129 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "da98ce99-0e03-465a-9d86-0e3cb7fbcb59" (UID: "da98ce99-0e03-465a-9d86-0e3cb7fbcb59"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.515623 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da98ce99-0e03-465a-9d86-0e3cb7fbcb59" (UID: "da98ce99-0e03-465a-9d86-0e3cb7fbcb59"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.541082 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-config-data" (OuterVolumeSpecName: "config-data") pod "da98ce99-0e03-465a-9d86-0e3cb7fbcb59" (UID: "da98ce99-0e03-465a-9d86-0e3cb7fbcb59"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.589154 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjg4q\" (UniqueName: \"kubernetes.io/projected/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-kube-api-access-wjg4q\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.589197 4682 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.589211 4682 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.589223 4682 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/da98ce99-0e03-465a-9d86-0e3cb7fbcb59-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.850007 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422801-lnk8m" event={"ID":"da98ce99-0e03-465a-9d86-0e3cb7fbcb59","Type":"ContainerDied","Data":"27c156b1ef0d96a1644103d360f09ba53f98ea787b6102dac63b73acf5a27b88"} Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.850063 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27c156b1ef0d96a1644103d360f09ba53f98ea787b6102dac63b73acf5a27b88" Dec 10 12:01:06 crc kubenswrapper[4682]: I1210 12:01:06.850118 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422801-lnk8m" Dec 10 12:01:13 crc kubenswrapper[4682]: E1210 12:01:13.382597 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:01:14 crc kubenswrapper[4682]: E1210 12:01:14.389702 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:01:18 crc kubenswrapper[4682]: I1210 12:01:18.381394 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:01:18 crc kubenswrapper[4682]: E1210 12:01:18.382288 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:01:26 crc kubenswrapper[4682]: E1210 12:01:26.383063 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:01:27 crc kubenswrapper[4682]: E1210 12:01:27.383125 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:01:33 crc kubenswrapper[4682]: I1210 12:01:33.381103 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:01:33 crc kubenswrapper[4682]: E1210 12:01:33.381963 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:01:39 crc kubenswrapper[4682]: E1210 12:01:39.382979 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:01:40 crc kubenswrapper[4682]: E1210 12:01:40.388201 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:01:48 crc kubenswrapper[4682]: I1210 12:01:48.381667 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:01:48 crc kubenswrapper[4682]: E1210 12:01:48.383328 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:01:51 crc kubenswrapper[4682]: E1210 12:01:51.384788 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:01:55 crc kubenswrapper[4682]: E1210 12:01:55.383025 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:01:59 crc kubenswrapper[4682]: I1210 12:01:59.380729 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:01:59 crc kubenswrapper[4682]: E1210 12:01:59.381680 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:02:04 crc kubenswrapper[4682]: E1210 12:02:04.386463 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:02:09 crc kubenswrapper[4682]: E1210 12:02:09.383415 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:02:13 crc kubenswrapper[4682]: I1210 12:02:13.381528 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:02:13 crc kubenswrapper[4682]: E1210 12:02:13.382301 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:02:16 crc kubenswrapper[4682]: E1210 12:02:16.384851 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:02:21 crc kubenswrapper[4682]: E1210 12:02:21.383002 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:02:28 crc kubenswrapper[4682]: I1210 12:02:28.382720 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:02:28 crc kubenswrapper[4682]: E1210 12:02:28.383663 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:02:28 crc kubenswrapper[4682]: E1210 12:02:28.384773 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:02:35 crc kubenswrapper[4682]: E1210 12:02:35.383803 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:02:41 crc kubenswrapper[4682]: I1210 12:02:41.382759 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:02:41 crc kubenswrapper[4682]: E1210 12:02:41.383705 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:02:42 crc kubenswrapper[4682]: E1210 12:02:42.384705 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:02:48 crc kubenswrapper[4682]: E1210 12:02:48.386000 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:02:56 crc kubenswrapper[4682]: I1210 12:02:56.382382 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:02:56 crc kubenswrapper[4682]: E1210 12:02:56.383564 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:02:56 crc kubenswrapper[4682]: E1210 12:02:56.385233 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:03:02 crc kubenswrapper[4682]: E1210 12:03:02.384644 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:03:07 crc kubenswrapper[4682]: I1210 12:03:07.380874 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:03:07 crc kubenswrapper[4682]: E1210 12:03:07.381642 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:03:07 crc kubenswrapper[4682]: I1210 12:03:07.383282 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:03:07 crc kubenswrapper[4682]: E1210 12:03:07.509822 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:03:07 crc kubenswrapper[4682]: E1210 12:03:07.509900 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:03:07 crc kubenswrapper[4682]: E1210 12:03:07.510083 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:03:07 crc kubenswrapper[4682]: E1210 12:03:07.511627 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:03:17 crc kubenswrapper[4682]: E1210 12:03:17.571678 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:03:17 crc kubenswrapper[4682]: E1210 12:03:17.572206 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:03:17 crc kubenswrapper[4682]: E1210 12:03:17.572334 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:03:17 crc kubenswrapper[4682]: E1210 12:03:17.573548 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:03:19 crc kubenswrapper[4682]: I1210 12:03:19.382196 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:03:19 crc kubenswrapper[4682]: E1210 12:03:19.384303 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:03:21 crc kubenswrapper[4682]: E1210 12:03:21.383678 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:03:28 crc kubenswrapper[4682]: E1210 12:03:28.386313 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:03:33 crc kubenswrapper[4682]: I1210 12:03:33.380660 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:03:33 crc kubenswrapper[4682]: E1210 12:03:33.381825 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:03:33 crc kubenswrapper[4682]: E1210 12:03:33.384538 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:03:43 crc kubenswrapper[4682]: E1210 12:03:43.383091 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:03:45 crc kubenswrapper[4682]: I1210 12:03:45.381042 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:03:46 crc kubenswrapper[4682]: E1210 12:03:46.389262 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:03:46 crc kubenswrapper[4682]: I1210 12:03:46.487920 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"08b0f21e5f85b40aa7cb0d289080af87abc18c46b8cd43c7b4bd2e5e8b33365f"} Dec 10 12:03:58 crc kubenswrapper[4682]: E1210 12:03:58.382654 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:04:01 crc kubenswrapper[4682]: E1210 12:04:01.382789 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:04:13 crc kubenswrapper[4682]: E1210 12:04:13.382916 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:04:14 crc kubenswrapper[4682]: E1210 12:04:14.382965 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:04:24 crc kubenswrapper[4682]: E1210 12:04:24.384613 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:04:26 crc kubenswrapper[4682]: E1210 12:04:26.383002 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:04:39 crc kubenswrapper[4682]: E1210 12:04:39.383377 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:04:39 crc kubenswrapper[4682]: E1210 12:04:39.383494 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:04:51 crc kubenswrapper[4682]: E1210 12:04:51.385412 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:04:54 crc kubenswrapper[4682]: E1210 12:04:54.383654 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:05:02 crc kubenswrapper[4682]: E1210 12:05:02.382393 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:05:07 crc kubenswrapper[4682]: E1210 12:05:07.385149 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.037567 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w"] Dec 10 12:05:09 crc kubenswrapper[4682]: E1210 12:05:09.038402 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da98ce99-0e03-465a-9d86-0e3cb7fbcb59" containerName="keystone-cron" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.038422 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="da98ce99-0e03-465a-9d86-0e3cb7fbcb59" containerName="keystone-cron" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.038728 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="da98ce99-0e03-465a-9d86-0e3cb7fbcb59" containerName="keystone-cron" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.039791 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.045317 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.047602 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.047650 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.047916 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-tln2g" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.057272 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w"] Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.171891 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zklg\" (UniqueName: \"kubernetes.io/projected/11a38c1a-3a98-4c77-82ff-caf76c15fefc-kube-api-access-2zklg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.172123 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.172744 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.275644 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.276440 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.276598 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zklg\" (UniqueName: \"kubernetes.io/projected/11a38c1a-3a98-4c77-82ff-caf76c15fefc-kube-api-access-2zklg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.283716 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.285914 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.293577 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zklg\" (UniqueName: \"kubernetes.io/projected/11a38c1a-3a98-4c77-82ff-caf76c15fefc-kube-api-access-2zklg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.372168 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:05:09 crc kubenswrapper[4682]: I1210 12:05:09.998803 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w"] Dec 10 12:05:10 crc kubenswrapper[4682]: I1210 12:05:10.349802 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" event={"ID":"11a38c1a-3a98-4c77-82ff-caf76c15fefc","Type":"ContainerStarted","Data":"676ea4a1b1c584b7e14e93a778abf5e47a4e8042a62f2ba154a82958c248caad"} Dec 10 12:05:11 crc kubenswrapper[4682]: I1210 12:05:11.358750 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" event={"ID":"11a38c1a-3a98-4c77-82ff-caf76c15fefc","Type":"ContainerStarted","Data":"a6a656420a2248541a05ff3a17035afb06addabffac910809939c19800346bf5"} Dec 10 12:05:11 crc kubenswrapper[4682]: I1210 12:05:11.380223 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" podStartSLOduration=1.799205045 podStartE2EDuration="2.380203477s" podCreationTimestamp="2025-12-10 12:05:09 +0000 UTC" firstStartedPulling="2025-12-10 12:05:10.005951309 +0000 UTC m=+4790.326162059" lastFinishedPulling="2025-12-10 12:05:10.586949741 +0000 UTC m=+4790.907160491" observedRunningTime="2025-12-10 12:05:11.37744277 +0000 UTC m=+4791.697653530" watchObservedRunningTime="2025-12-10 12:05:11.380203477 +0000 UTC m=+4791.700414227" Dec 10 12:05:13 crc kubenswrapper[4682]: E1210 12:05:13.384639 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:05:19 crc kubenswrapper[4682]: E1210 12:05:19.383304 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:05:27 crc kubenswrapper[4682]: E1210 12:05:27.384604 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:05:32 crc kubenswrapper[4682]: E1210 12:05:32.384944 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.343595 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8xgm6"] Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.346911 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.352437 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8xgm6"] Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.433241 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-utilities\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.433307 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-catalog-content\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.433353 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lk8x\" (UniqueName: \"kubernetes.io/projected/fad22171-7f15-49a6-90ea-af7388f38ff3-kube-api-access-8lk8x\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.535148 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lk8x\" (UniqueName: \"kubernetes.io/projected/fad22171-7f15-49a6-90ea-af7388f38ff3-kube-api-access-8lk8x\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.535347 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-utilities\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.535377 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-catalog-content\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.535897 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-catalog-content\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.536002 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-utilities\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.553591 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lk8x\" (UniqueName: \"kubernetes.io/projected/fad22171-7f15-49a6-90ea-af7388f38ff3-kube-api-access-8lk8x\") pod \"certified-operators-8xgm6\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:38 crc kubenswrapper[4682]: I1210 12:05:38.674730 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:39 crc kubenswrapper[4682]: I1210 12:05:39.207578 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8xgm6"] Dec 10 12:05:39 crc kubenswrapper[4682]: E1210 12:05:39.382946 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:05:39 crc kubenswrapper[4682]: I1210 12:05:39.636746 4682 generic.go:334] "Generic (PLEG): container finished" podID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerID="995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88" exitCode=0 Dec 10 12:05:39 crc kubenswrapper[4682]: I1210 12:05:39.636813 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8xgm6" event={"ID":"fad22171-7f15-49a6-90ea-af7388f38ff3","Type":"ContainerDied","Data":"995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88"} Dec 10 12:05:39 crc kubenswrapper[4682]: I1210 12:05:39.636872 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8xgm6" event={"ID":"fad22171-7f15-49a6-90ea-af7388f38ff3","Type":"ContainerStarted","Data":"bd12369bd8c3e71b4a10b0ccc96c7c8575fafdaa438a2e9518ea0c2f244fc877"} Dec 10 12:05:41 crc kubenswrapper[4682]: I1210 12:05:41.655039 4682 generic.go:334] "Generic (PLEG): container finished" podID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerID="133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d" exitCode=0 Dec 10 12:05:41 crc kubenswrapper[4682]: I1210 12:05:41.655136 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8xgm6" event={"ID":"fad22171-7f15-49a6-90ea-af7388f38ff3","Type":"ContainerDied","Data":"133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d"} Dec 10 12:05:45 crc kubenswrapper[4682]: I1210 12:05:45.696479 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8xgm6" event={"ID":"fad22171-7f15-49a6-90ea-af7388f38ff3","Type":"ContainerStarted","Data":"be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413"} Dec 10 12:05:45 crc kubenswrapper[4682]: I1210 12:05:45.748128 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8xgm6" podStartSLOduration=3.27702935 podStartE2EDuration="7.7481072s" podCreationTimestamp="2025-12-10 12:05:38 +0000 UTC" firstStartedPulling="2025-12-10 12:05:39.640210183 +0000 UTC m=+4819.960420933" lastFinishedPulling="2025-12-10 12:05:44.111288033 +0000 UTC m=+4824.431498783" observedRunningTime="2025-12-10 12:05:45.730571452 +0000 UTC m=+4826.050782202" watchObservedRunningTime="2025-12-10 12:05:45.7481072 +0000 UTC m=+4826.068317950" Dec 10 12:05:47 crc kubenswrapper[4682]: E1210 12:05:47.382314 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.484733 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ccpbv"] Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.487055 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.544017 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccpbv"] Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.628456 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxb8r\" (UniqueName: \"kubernetes.io/projected/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-kube-api-access-mxb8r\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.628609 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-catalog-content\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.628713 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-utilities\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.675667 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.676004 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.731116 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxb8r\" (UniqueName: \"kubernetes.io/projected/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-kube-api-access-mxb8r\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.731166 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-catalog-content\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.731219 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-utilities\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.732057 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-utilities\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.732056 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-catalog-content\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.732935 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.753145 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxb8r\" (UniqueName: \"kubernetes.io/projected/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-kube-api-access-mxb8r\") pod \"redhat-marketplace-ccpbv\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:48 crc kubenswrapper[4682]: I1210 12:05:48.856783 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:49 crc kubenswrapper[4682]: I1210 12:05:49.364790 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccpbv"] Dec 10 12:05:49 crc kubenswrapper[4682]: I1210 12:05:49.741232 4682 generic.go:334] "Generic (PLEG): container finished" podID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerID="4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b" exitCode=0 Dec 10 12:05:49 crc kubenswrapper[4682]: I1210 12:05:49.741292 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccpbv" event={"ID":"7f5d87ee-05f5-4d4d-9f07-6cca84567daf","Type":"ContainerDied","Data":"4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b"} Dec 10 12:05:49 crc kubenswrapper[4682]: I1210 12:05:49.741516 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccpbv" event={"ID":"7f5d87ee-05f5-4d4d-9f07-6cca84567daf","Type":"ContainerStarted","Data":"4c3e3f8226c1ef15d354d07b16d04277e3cb05ee0d457e2ce097b33892159233"} Dec 10 12:05:51 crc kubenswrapper[4682]: I1210 12:05:51.759851 4682 generic.go:334] "Generic (PLEG): container finished" podID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerID="865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3" exitCode=0 Dec 10 12:05:51 crc kubenswrapper[4682]: I1210 12:05:51.759966 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccpbv" event={"ID":"7f5d87ee-05f5-4d4d-9f07-6cca84567daf","Type":"ContainerDied","Data":"865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3"} Dec 10 12:05:52 crc kubenswrapper[4682]: E1210 12:05:52.382797 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:05:52 crc kubenswrapper[4682]: I1210 12:05:52.770636 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccpbv" event={"ID":"7f5d87ee-05f5-4d4d-9f07-6cca84567daf","Type":"ContainerStarted","Data":"86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53"} Dec 10 12:05:52 crc kubenswrapper[4682]: I1210 12:05:52.796371 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ccpbv" podStartSLOduration=2.100886993 podStartE2EDuration="4.796346815s" podCreationTimestamp="2025-12-10 12:05:48 +0000 UTC" firstStartedPulling="2025-12-10 12:05:49.743331435 +0000 UTC m=+4830.063542185" lastFinishedPulling="2025-12-10 12:05:52.438791247 +0000 UTC m=+4832.759002007" observedRunningTime="2025-12-10 12:05:52.784153712 +0000 UTC m=+4833.104364472" watchObservedRunningTime="2025-12-10 12:05:52.796346815 +0000 UTC m=+4833.116557575" Dec 10 12:05:58 crc kubenswrapper[4682]: I1210 12:05:58.724321 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:58 crc kubenswrapper[4682]: I1210 12:05:58.784799 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8xgm6"] Dec 10 12:05:58 crc kubenswrapper[4682]: I1210 12:05:58.843301 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8xgm6" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="registry-server" containerID="cri-o://be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413" gracePeriod=2 Dec 10 12:05:58 crc kubenswrapper[4682]: I1210 12:05:58.857341 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:58 crc kubenswrapper[4682]: I1210 12:05:58.857387 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:58 crc kubenswrapper[4682]: I1210 12:05:58.912382 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.320998 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.465398 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-utilities\") pod \"fad22171-7f15-49a6-90ea-af7388f38ff3\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.465686 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-catalog-content\") pod \"fad22171-7f15-49a6-90ea-af7388f38ff3\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.465728 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lk8x\" (UniqueName: \"kubernetes.io/projected/fad22171-7f15-49a6-90ea-af7388f38ff3-kube-api-access-8lk8x\") pod \"fad22171-7f15-49a6-90ea-af7388f38ff3\" (UID: \"fad22171-7f15-49a6-90ea-af7388f38ff3\") " Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.467562 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-utilities" (OuterVolumeSpecName: "utilities") pod "fad22171-7f15-49a6-90ea-af7388f38ff3" (UID: "fad22171-7f15-49a6-90ea-af7388f38ff3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.475335 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad22171-7f15-49a6-90ea-af7388f38ff3-kube-api-access-8lk8x" (OuterVolumeSpecName: "kube-api-access-8lk8x") pod "fad22171-7f15-49a6-90ea-af7388f38ff3" (UID: "fad22171-7f15-49a6-90ea-af7388f38ff3"). InnerVolumeSpecName "kube-api-access-8lk8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.531152 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fad22171-7f15-49a6-90ea-af7388f38ff3" (UID: "fad22171-7f15-49a6-90ea-af7388f38ff3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.568270 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.568308 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fad22171-7f15-49a6-90ea-af7388f38ff3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.568323 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lk8x\" (UniqueName: \"kubernetes.io/projected/fad22171-7f15-49a6-90ea-af7388f38ff3-kube-api-access-8lk8x\") on node \"crc\" DevicePath \"\"" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.858676 4682 generic.go:334] "Generic (PLEG): container finished" podID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerID="be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413" exitCode=0 Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.858720 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8xgm6" event={"ID":"fad22171-7f15-49a6-90ea-af7388f38ff3","Type":"ContainerDied","Data":"be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413"} Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.858795 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8xgm6" event={"ID":"fad22171-7f15-49a6-90ea-af7388f38ff3","Type":"ContainerDied","Data":"bd12369bd8c3e71b4a10b0ccc96c7c8575fafdaa438a2e9518ea0c2f244fc877"} Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.858818 4682 scope.go:117] "RemoveContainer" containerID="be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.859672 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8xgm6" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.902175 4682 scope.go:117] "RemoveContainer" containerID="133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.924265 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8xgm6"] Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.935378 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8xgm6"] Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.946427 4682 scope.go:117] "RemoveContainer" containerID="995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.953026 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.985940 4682 scope.go:117] "RemoveContainer" containerID="be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413" Dec 10 12:05:59 crc kubenswrapper[4682]: E1210 12:05:59.986353 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413\": container with ID starting with be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413 not found: ID does not exist" containerID="be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.986384 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413"} err="failed to get container status \"be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413\": rpc error: code = NotFound desc = could not find container \"be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413\": container with ID starting with be346b0e958b24fd3afc88e8f776ab4506f886ec10d4e256bb413003f27f1413 not found: ID does not exist" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.986404 4682 scope.go:117] "RemoveContainer" containerID="133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d" Dec 10 12:05:59 crc kubenswrapper[4682]: E1210 12:05:59.986792 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d\": container with ID starting with 133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d not found: ID does not exist" containerID="133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.986824 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d"} err="failed to get container status \"133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d\": rpc error: code = NotFound desc = could not find container \"133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d\": container with ID starting with 133ff575bc630a3e9fcad7881469d4d333ddb27f71511b19006c11c0c9f2420d not found: ID does not exist" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.986843 4682 scope.go:117] "RemoveContainer" containerID="995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88" Dec 10 12:05:59 crc kubenswrapper[4682]: E1210 12:05:59.987074 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88\": container with ID starting with 995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88 not found: ID does not exist" containerID="995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88" Dec 10 12:05:59 crc kubenswrapper[4682]: I1210 12:05:59.987135 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88"} err="failed to get container status \"995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88\": rpc error: code = NotFound desc = could not find container \"995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88\": container with ID starting with 995c0fd1d19b34342cf1695cdcbff404b909f8722f9a3e392d3d663407957b88 not found: ID does not exist" Dec 10 12:06:00 crc kubenswrapper[4682]: I1210 12:06:00.395282 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" path="/var/lib/kubelet/pods/fad22171-7f15-49a6-90ea-af7388f38ff3/volumes" Dec 10 12:06:01 crc kubenswrapper[4682]: I1210 12:06:01.161071 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccpbv"] Dec 10 12:06:01 crc kubenswrapper[4682]: E1210 12:06:01.382216 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:06:01 crc kubenswrapper[4682]: I1210 12:06:01.882008 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ccpbv" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="registry-server" containerID="cri-o://86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53" gracePeriod=2 Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.422274 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.437314 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-utilities\") pod \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.437408 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-catalog-content\") pod \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.437690 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxb8r\" (UniqueName: \"kubernetes.io/projected/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-kube-api-access-mxb8r\") pod \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\" (UID: \"7f5d87ee-05f5-4d4d-9f07-6cca84567daf\") " Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.438618 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-utilities" (OuterVolumeSpecName: "utilities") pod "7f5d87ee-05f5-4d4d-9f07-6cca84567daf" (UID: "7f5d87ee-05f5-4d4d-9f07-6cca84567daf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.441853 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.443572 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-kube-api-access-mxb8r" (OuterVolumeSpecName: "kube-api-access-mxb8r") pod "7f5d87ee-05f5-4d4d-9f07-6cca84567daf" (UID: "7f5d87ee-05f5-4d4d-9f07-6cca84567daf"). InnerVolumeSpecName "kube-api-access-mxb8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.464317 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f5d87ee-05f5-4d4d-9f07-6cca84567daf" (UID: "7f5d87ee-05f5-4d4d-9f07-6cca84567daf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.543548 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.543583 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxb8r\" (UniqueName: \"kubernetes.io/projected/7f5d87ee-05f5-4d4d-9f07-6cca84567daf-kube-api-access-mxb8r\") on node \"crc\" DevicePath \"\"" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.894953 4682 generic.go:334] "Generic (PLEG): container finished" podID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerID="86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53" exitCode=0 Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.895003 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccpbv" event={"ID":"7f5d87ee-05f5-4d4d-9f07-6cca84567daf","Type":"ContainerDied","Data":"86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53"} Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.895013 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ccpbv" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.895036 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ccpbv" event={"ID":"7f5d87ee-05f5-4d4d-9f07-6cca84567daf","Type":"ContainerDied","Data":"4c3e3f8226c1ef15d354d07b16d04277e3cb05ee0d457e2ce097b33892159233"} Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.895056 4682 scope.go:117] "RemoveContainer" containerID="86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.934024 4682 scope.go:117] "RemoveContainer" containerID="865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3" Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.940469 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccpbv"] Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.954297 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ccpbv"] Dec 10 12:06:02 crc kubenswrapper[4682]: I1210 12:06:02.961506 4682 scope.go:117] "RemoveContainer" containerID="4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b" Dec 10 12:06:03 crc kubenswrapper[4682]: I1210 12:06:03.012500 4682 scope.go:117] "RemoveContainer" containerID="86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53" Dec 10 12:06:03 crc kubenswrapper[4682]: E1210 12:06:03.013375 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53\": container with ID starting with 86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53 not found: ID does not exist" containerID="86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53" Dec 10 12:06:03 crc kubenswrapper[4682]: I1210 12:06:03.013406 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53"} err="failed to get container status \"86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53\": rpc error: code = NotFound desc = could not find container \"86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53\": container with ID starting with 86b9462869c386057aeed9893cef5648f20f8975cc397f6167167947b1ca7f53 not found: ID does not exist" Dec 10 12:06:03 crc kubenswrapper[4682]: I1210 12:06:03.013430 4682 scope.go:117] "RemoveContainer" containerID="865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3" Dec 10 12:06:03 crc kubenswrapper[4682]: E1210 12:06:03.013742 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3\": container with ID starting with 865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3 not found: ID does not exist" containerID="865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3" Dec 10 12:06:03 crc kubenswrapper[4682]: I1210 12:06:03.013766 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3"} err="failed to get container status \"865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3\": rpc error: code = NotFound desc = could not find container \"865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3\": container with ID starting with 865c59bd9d7fb43a203e70e6297bb6610c73d9eb4f9fd4eb2440e68289ac33b3 not found: ID does not exist" Dec 10 12:06:03 crc kubenswrapper[4682]: I1210 12:06:03.013779 4682 scope.go:117] "RemoveContainer" containerID="4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b" Dec 10 12:06:03 crc kubenswrapper[4682]: E1210 12:06:03.014166 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b\": container with ID starting with 4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b not found: ID does not exist" containerID="4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b" Dec 10 12:06:03 crc kubenswrapper[4682]: I1210 12:06:03.014219 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b"} err="failed to get container status \"4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b\": rpc error: code = NotFound desc = could not find container \"4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b\": container with ID starting with 4826dbdecd61846d93b26143c22c9e3223622cf2ed070e90e11574fc9ac0335b not found: ID does not exist" Dec 10 12:06:03 crc kubenswrapper[4682]: E1210 12:06:03.382780 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:06:04 crc kubenswrapper[4682]: I1210 12:06:04.396663 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" path="/var/lib/kubelet/pods/7f5d87ee-05f5-4d4d-9f07-6cca84567daf/volumes" Dec 10 12:06:06 crc kubenswrapper[4682]: I1210 12:06:06.479132 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:06:06 crc kubenswrapper[4682]: I1210 12:06:06.479813 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:06:14 crc kubenswrapper[4682]: E1210 12:06:14.386353 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:06:16 crc kubenswrapper[4682]: E1210 12:06:16.384056 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:06:26 crc kubenswrapper[4682]: E1210 12:06:26.382909 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:06:29 crc kubenswrapper[4682]: E1210 12:06:29.385748 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:06:36 crc kubenswrapper[4682]: I1210 12:06:36.478440 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:06:36 crc kubenswrapper[4682]: I1210 12:06:36.479127 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:06:39 crc kubenswrapper[4682]: E1210 12:06:39.382991 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:06:40 crc kubenswrapper[4682]: E1210 12:06:40.393314 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:06:53 crc kubenswrapper[4682]: E1210 12:06:53.384057 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:06:54 crc kubenswrapper[4682]: E1210 12:06:54.384114 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.478289 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.478873 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.478930 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.479728 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"08b0f21e5f85b40aa7cb0d289080af87abc18c46b8cd43c7b4bd2e5e8b33365f"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.479781 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://08b0f21e5f85b40aa7cb0d289080af87abc18c46b8cd43c7b4bd2e5e8b33365f" gracePeriod=600 Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.689076 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="08b0f21e5f85b40aa7cb0d289080af87abc18c46b8cd43c7b4bd2e5e8b33365f" exitCode=0 Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.689127 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"08b0f21e5f85b40aa7cb0d289080af87abc18c46b8cd43c7b4bd2e5e8b33365f"} Dec 10 12:07:06 crc kubenswrapper[4682]: I1210 12:07:06.689166 4682 scope.go:117] "RemoveContainer" containerID="3299fa6b3a703877f1c0f21e75ceba5bfd983a703171495ff394efb3bbc72f49" Dec 10 12:07:07 crc kubenswrapper[4682]: E1210 12:07:07.393217 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:07:07 crc kubenswrapper[4682]: I1210 12:07:07.711326 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20"} Dec 10 12:07:08 crc kubenswrapper[4682]: E1210 12:07:08.383502 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.165064 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vzbz4"] Dec 10 12:07:11 crc kubenswrapper[4682]: E1210 12:07:11.165935 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="registry-server" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.165953 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="registry-server" Dec 10 12:07:11 crc kubenswrapper[4682]: E1210 12:07:11.165972 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="extract-utilities" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.165979 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="extract-utilities" Dec 10 12:07:11 crc kubenswrapper[4682]: E1210 12:07:11.166007 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="extract-utilities" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.166015 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="extract-utilities" Dec 10 12:07:11 crc kubenswrapper[4682]: E1210 12:07:11.166029 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="extract-content" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.166036 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="extract-content" Dec 10 12:07:11 crc kubenswrapper[4682]: E1210 12:07:11.166050 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="registry-server" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.166057 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="registry-server" Dec 10 12:07:11 crc kubenswrapper[4682]: E1210 12:07:11.166092 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="extract-content" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.166098 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="extract-content" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.166298 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad22171-7f15-49a6-90ea-af7388f38ff3" containerName="registry-server" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.166332 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f5d87ee-05f5-4d4d-9f07-6cca84567daf" containerName="registry-server" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.168163 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.182015 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vzbz4"] Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.207541 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-catalog-content\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.207589 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfd9c\" (UniqueName: \"kubernetes.io/projected/f93d815e-bf20-4ca5-9ef3-cb2de2293682-kube-api-access-hfd9c\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.207651 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-utilities\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.309768 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-catalog-content\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.309831 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfd9c\" (UniqueName: \"kubernetes.io/projected/f93d815e-bf20-4ca5-9ef3-cb2de2293682-kube-api-access-hfd9c\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.309911 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-utilities\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.310383 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-catalog-content\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.310393 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-utilities\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.336527 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfd9c\" (UniqueName: \"kubernetes.io/projected/f93d815e-bf20-4ca5-9ef3-cb2de2293682-kube-api-access-hfd9c\") pod \"redhat-operators-vzbz4\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.493506 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:11 crc kubenswrapper[4682]: I1210 12:07:11.982350 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vzbz4"] Dec 10 12:07:12 crc kubenswrapper[4682]: I1210 12:07:12.759647 4682 generic.go:334] "Generic (PLEG): container finished" podID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerID="ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7" exitCode=0 Dec 10 12:07:12 crc kubenswrapper[4682]: I1210 12:07:12.760159 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerDied","Data":"ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7"} Dec 10 12:07:12 crc kubenswrapper[4682]: I1210 12:07:12.760188 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerStarted","Data":"db1f1e1686b87dde198eadd437ff907376bb44e7498c6698a74fb6cac7796e5e"} Dec 10 12:07:14 crc kubenswrapper[4682]: I1210 12:07:14.782752 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerStarted","Data":"d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c"} Dec 10 12:07:15 crc kubenswrapper[4682]: I1210 12:07:15.792267 4682 generic.go:334] "Generic (PLEG): container finished" podID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerID="d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c" exitCode=0 Dec 10 12:07:15 crc kubenswrapper[4682]: I1210 12:07:15.792418 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerDied","Data":"d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c"} Dec 10 12:07:19 crc kubenswrapper[4682]: I1210 12:07:19.829447 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerStarted","Data":"572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e"} Dec 10 12:07:21 crc kubenswrapper[4682]: I1210 12:07:21.493850 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:21 crc kubenswrapper[4682]: I1210 12:07:21.494260 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:22 crc kubenswrapper[4682]: E1210 12:07:22.383389 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:07:22 crc kubenswrapper[4682]: I1210 12:07:22.549496 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vzbz4" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="registry-server" probeResult="failure" output=< Dec 10 12:07:22 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 12:07:22 crc kubenswrapper[4682]: > Dec 10 12:07:23 crc kubenswrapper[4682]: E1210 12:07:23.385341 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:07:31 crc kubenswrapper[4682]: I1210 12:07:31.541231 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:31 crc kubenswrapper[4682]: I1210 12:07:31.560849 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vzbz4" podStartSLOduration=14.255509212 podStartE2EDuration="20.560832999s" podCreationTimestamp="2025-12-10 12:07:11 +0000 UTC" firstStartedPulling="2025-12-10 12:07:12.762382504 +0000 UTC m=+4913.082593264" lastFinishedPulling="2025-12-10 12:07:19.067706291 +0000 UTC m=+4919.387917051" observedRunningTime="2025-12-10 12:07:19.846840574 +0000 UTC m=+4920.167051324" watchObservedRunningTime="2025-12-10 12:07:31.560832999 +0000 UTC m=+4931.881043749" Dec 10 12:07:31 crc kubenswrapper[4682]: I1210 12:07:31.600715 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:31 crc kubenswrapper[4682]: I1210 12:07:31.776074 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vzbz4"] Dec 10 12:07:32 crc kubenswrapper[4682]: I1210 12:07:32.957424 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vzbz4" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="registry-server" containerID="cri-o://572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e" gracePeriod=2 Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.500223 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.606207 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfd9c\" (UniqueName: \"kubernetes.io/projected/f93d815e-bf20-4ca5-9ef3-cb2de2293682-kube-api-access-hfd9c\") pod \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.606442 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-catalog-content\") pod \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.606565 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-utilities\") pod \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\" (UID: \"f93d815e-bf20-4ca5-9ef3-cb2de2293682\") " Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.607860 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-utilities" (OuterVolumeSpecName: "utilities") pod "f93d815e-bf20-4ca5-9ef3-cb2de2293682" (UID: "f93d815e-bf20-4ca5-9ef3-cb2de2293682"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.616794 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f93d815e-bf20-4ca5-9ef3-cb2de2293682-kube-api-access-hfd9c" (OuterVolumeSpecName: "kube-api-access-hfd9c") pod "f93d815e-bf20-4ca5-9ef3-cb2de2293682" (UID: "f93d815e-bf20-4ca5-9ef3-cb2de2293682"). InnerVolumeSpecName "kube-api-access-hfd9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.709071 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.709610 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfd9c\" (UniqueName: \"kubernetes.io/projected/f93d815e-bf20-4ca5-9ef3-cb2de2293682-kube-api-access-hfd9c\") on node \"crc\" DevicePath \"\"" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.721601 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f93d815e-bf20-4ca5-9ef3-cb2de2293682" (UID: "f93d815e-bf20-4ca5-9ef3-cb2de2293682"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.811446 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f93d815e-bf20-4ca5-9ef3-cb2de2293682-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.969605 4682 generic.go:334] "Generic (PLEG): container finished" podID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerID="572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e" exitCode=0 Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.969659 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerDied","Data":"572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e"} Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.969693 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzbz4" event={"ID":"f93d815e-bf20-4ca5-9ef3-cb2de2293682","Type":"ContainerDied","Data":"db1f1e1686b87dde198eadd437ff907376bb44e7498c6698a74fb6cac7796e5e"} Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.969713 4682 scope.go:117] "RemoveContainer" containerID="572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.969753 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzbz4" Dec 10 12:07:33 crc kubenswrapper[4682]: I1210 12:07:33.991616 4682 scope.go:117] "RemoveContainer" containerID="d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.021020 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vzbz4"] Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.029687 4682 scope.go:117] "RemoveContainer" containerID="ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.032815 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vzbz4"] Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.069658 4682 scope.go:117] "RemoveContainer" containerID="572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e" Dec 10 12:07:34 crc kubenswrapper[4682]: E1210 12:07:34.070187 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e\": container with ID starting with 572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e not found: ID does not exist" containerID="572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.070362 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e"} err="failed to get container status \"572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e\": rpc error: code = NotFound desc = could not find container \"572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e\": container with ID starting with 572f3679a28aadf274d8968c660a9e8ce13dc9f2b21e24f70d417eb86374922e not found: ID does not exist" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.070461 4682 scope.go:117] "RemoveContainer" containerID="d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c" Dec 10 12:07:34 crc kubenswrapper[4682]: E1210 12:07:34.073802 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c\": container with ID starting with d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c not found: ID does not exist" containerID="d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.073881 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c"} err="failed to get container status \"d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c\": rpc error: code = NotFound desc = could not find container \"d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c\": container with ID starting with d48a4468ca028f2512d99db7f059a86f635fc07a4ff544838d208e378d0b591c not found: ID does not exist" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.073966 4682 scope.go:117] "RemoveContainer" containerID="ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7" Dec 10 12:07:34 crc kubenswrapper[4682]: E1210 12:07:34.074269 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7\": container with ID starting with ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7 not found: ID does not exist" containerID="ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.074360 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7"} err="failed to get container status \"ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7\": rpc error: code = NotFound desc = could not find container \"ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7\": container with ID starting with ac938ad9d8802426d20e171511fa9f864b31a50b58816f99563d941e1cb959a7 not found: ID does not exist" Dec 10 12:07:34 crc kubenswrapper[4682]: E1210 12:07:34.383879 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:07:34 crc kubenswrapper[4682]: E1210 12:07:34.383948 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:07:34 crc kubenswrapper[4682]: I1210 12:07:34.403753 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" path="/var/lib/kubelet/pods/f93d815e-bf20-4ca5-9ef3-cb2de2293682/volumes" Dec 10 12:07:47 crc kubenswrapper[4682]: E1210 12:07:47.385374 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:07:49 crc kubenswrapper[4682]: E1210 12:07:49.385060 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:07:59 crc kubenswrapper[4682]: E1210 12:07:59.385824 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:08:00 crc kubenswrapper[4682]: E1210 12:08:00.392540 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:08:11 crc kubenswrapper[4682]: E1210 12:08:11.384443 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:08:15 crc kubenswrapper[4682]: I1210 12:08:15.383189 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:08:15 crc kubenswrapper[4682]: E1210 12:08:15.507930 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:08:15 crc kubenswrapper[4682]: E1210 12:08:15.507996 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:08:15 crc kubenswrapper[4682]: E1210 12:08:15.508163 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:08:15 crc kubenswrapper[4682]: E1210 12:08:15.509343 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:08:22 crc kubenswrapper[4682]: E1210 12:08:22.484058 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:08:22 crc kubenswrapper[4682]: E1210 12:08:22.484786 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:08:22 crc kubenswrapper[4682]: E1210 12:08:22.485072 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:08:22 crc kubenswrapper[4682]: E1210 12:08:22.486221 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:08:30 crc kubenswrapper[4682]: E1210 12:08:30.383334 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:08:34 crc kubenswrapper[4682]: E1210 12:08:34.383351 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:08:45 crc kubenswrapper[4682]: E1210 12:08:45.382695 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:08:46 crc kubenswrapper[4682]: E1210 12:08:46.383057 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.467526 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xtkrb"] Dec 10 12:08:46 crc kubenswrapper[4682]: E1210 12:08:46.468087 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="registry-server" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.468106 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="registry-server" Dec 10 12:08:46 crc kubenswrapper[4682]: E1210 12:08:46.468155 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="extract-utilities" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.468165 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="extract-utilities" Dec 10 12:08:46 crc kubenswrapper[4682]: E1210 12:08:46.468190 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="extract-content" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.468199 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="extract-content" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.468507 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="f93d815e-bf20-4ca5-9ef3-cb2de2293682" containerName="registry-server" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.470578 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.479443 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xtkrb"] Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.597852 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-utilities\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.597941 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbjvl\" (UniqueName: \"kubernetes.io/projected/5b71b903-105e-4f96-88eb-0e540b41ec51-kube-api-access-qbjvl\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.598040 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-catalog-content\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.699997 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbjvl\" (UniqueName: \"kubernetes.io/projected/5b71b903-105e-4f96-88eb-0e540b41ec51-kube-api-access-qbjvl\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.700159 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-catalog-content\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.700281 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-utilities\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.700678 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-catalog-content\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.700906 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-utilities\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.721990 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbjvl\" (UniqueName: \"kubernetes.io/projected/5b71b903-105e-4f96-88eb-0e540b41ec51-kube-api-access-qbjvl\") pod \"community-operators-xtkrb\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:46 crc kubenswrapper[4682]: I1210 12:08:46.791654 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:47 crc kubenswrapper[4682]: I1210 12:08:47.355143 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xtkrb"] Dec 10 12:08:47 crc kubenswrapper[4682]: I1210 12:08:47.736832 4682 generic.go:334] "Generic (PLEG): container finished" podID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerID="b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d" exitCode=0 Dec 10 12:08:47 crc kubenswrapper[4682]: I1210 12:08:47.736887 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerDied","Data":"b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d"} Dec 10 12:08:47 crc kubenswrapper[4682]: I1210 12:08:47.736917 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerStarted","Data":"47f9f005dbf9fdb1d784c60b19f07247d7e4545f21520697de12a47680b4ee94"} Dec 10 12:08:48 crc kubenswrapper[4682]: I1210 12:08:48.748368 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerStarted","Data":"2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372"} Dec 10 12:08:49 crc kubenswrapper[4682]: I1210 12:08:49.758598 4682 generic.go:334] "Generic (PLEG): container finished" podID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerID="2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372" exitCode=0 Dec 10 12:08:49 crc kubenswrapper[4682]: I1210 12:08:49.758638 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerDied","Data":"2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372"} Dec 10 12:08:50 crc kubenswrapper[4682]: I1210 12:08:50.773281 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerStarted","Data":"a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b"} Dec 10 12:08:50 crc kubenswrapper[4682]: I1210 12:08:50.802199 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xtkrb" podStartSLOduration=2.35488698 podStartE2EDuration="4.802173773s" podCreationTimestamp="2025-12-10 12:08:46 +0000 UTC" firstStartedPulling="2025-12-10 12:08:47.749235015 +0000 UTC m=+5008.069445765" lastFinishedPulling="2025-12-10 12:08:50.196521768 +0000 UTC m=+5010.516732558" observedRunningTime="2025-12-10 12:08:50.795784862 +0000 UTC m=+5011.115995622" watchObservedRunningTime="2025-12-10 12:08:50.802173773 +0000 UTC m=+5011.122384533" Dec 10 12:08:56 crc kubenswrapper[4682]: I1210 12:08:56.791985 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:56 crc kubenswrapper[4682]: I1210 12:08:56.792676 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:57 crc kubenswrapper[4682]: I1210 12:08:57.043558 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:57 crc kubenswrapper[4682]: I1210 12:08:57.119329 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:08:57 crc kubenswrapper[4682]: E1210 12:08:57.384280 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:08:58 crc kubenswrapper[4682]: E1210 12:08:58.383237 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:09:00 crc kubenswrapper[4682]: I1210 12:09:00.653335 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xtkrb"] Dec 10 12:09:00 crc kubenswrapper[4682]: I1210 12:09:00.654107 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xtkrb" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="registry-server" containerID="cri-o://a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b" gracePeriod=2 Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.795435 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.879636 4682 generic.go:334] "Generic (PLEG): container finished" podID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerID="a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b" exitCode=0 Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.879682 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtkrb" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.879684 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerDied","Data":"a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b"} Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.879794 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtkrb" event={"ID":"5b71b903-105e-4f96-88eb-0e540b41ec51","Type":"ContainerDied","Data":"47f9f005dbf9fdb1d784c60b19f07247d7e4545f21520697de12a47680b4ee94"} Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.879810 4682 scope.go:117] "RemoveContainer" containerID="a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.894127 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-utilities\") pod \"5b71b903-105e-4f96-88eb-0e540b41ec51\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.894283 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-catalog-content\") pod \"5b71b903-105e-4f96-88eb-0e540b41ec51\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.894367 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbjvl\" (UniqueName: \"kubernetes.io/projected/5b71b903-105e-4f96-88eb-0e540b41ec51-kube-api-access-qbjvl\") pod \"5b71b903-105e-4f96-88eb-0e540b41ec51\" (UID: \"5b71b903-105e-4f96-88eb-0e540b41ec51\") " Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.896655 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-utilities" (OuterVolumeSpecName: "utilities") pod "5b71b903-105e-4f96-88eb-0e540b41ec51" (UID: "5b71b903-105e-4f96-88eb-0e540b41ec51"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.910140 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b71b903-105e-4f96-88eb-0e540b41ec51-kube-api-access-qbjvl" (OuterVolumeSpecName: "kube-api-access-qbjvl") pod "5b71b903-105e-4f96-88eb-0e540b41ec51" (UID: "5b71b903-105e-4f96-88eb-0e540b41ec51"). InnerVolumeSpecName "kube-api-access-qbjvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.911262 4682 scope.go:117] "RemoveContainer" containerID="2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.949838 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b71b903-105e-4f96-88eb-0e540b41ec51" (UID: "5b71b903-105e-4f96-88eb-0e540b41ec51"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.967912 4682 scope.go:117] "RemoveContainer" containerID="b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.996404 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbjvl\" (UniqueName: \"kubernetes.io/projected/5b71b903-105e-4f96-88eb-0e540b41ec51-kube-api-access-qbjvl\") on node \"crc\" DevicePath \"\"" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.996439 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:09:01 crc kubenswrapper[4682]: I1210 12:09:01.996449 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b71b903-105e-4f96-88eb-0e540b41ec51-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.018490 4682 scope.go:117] "RemoveContainer" containerID="a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b" Dec 10 12:09:02 crc kubenswrapper[4682]: E1210 12:09:02.018839 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b\": container with ID starting with a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b not found: ID does not exist" containerID="a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.018872 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b"} err="failed to get container status \"a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b\": rpc error: code = NotFound desc = could not find container \"a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b\": container with ID starting with a08c93e76d37fecf353a81267be7c2605322236d872c2dfc4c06ec022a45c65b not found: ID does not exist" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.018894 4682 scope.go:117] "RemoveContainer" containerID="2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372" Dec 10 12:09:02 crc kubenswrapper[4682]: E1210 12:09:02.019265 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372\": container with ID starting with 2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372 not found: ID does not exist" containerID="2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.019292 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372"} err="failed to get container status \"2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372\": rpc error: code = NotFound desc = could not find container \"2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372\": container with ID starting with 2118940a3beefb34334ec5e16fbb168913f51e41766d10982fe49950bbef5372 not found: ID does not exist" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.019307 4682 scope.go:117] "RemoveContainer" containerID="b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d" Dec 10 12:09:02 crc kubenswrapper[4682]: E1210 12:09:02.019588 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d\": container with ID starting with b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d not found: ID does not exist" containerID="b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.019608 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d"} err="failed to get container status \"b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d\": rpc error: code = NotFound desc = could not find container \"b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d\": container with ID starting with b412658a3967d6bdd1380a833da2e4a98b61f4fdcfd0dcaab80b5abeacdfbe0d not found: ID does not exist" Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.266142 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xtkrb"] Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.276650 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xtkrb"] Dec 10 12:09:02 crc kubenswrapper[4682]: I1210 12:09:02.391708 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" path="/var/lib/kubelet/pods/5b71b903-105e-4f96-88eb-0e540b41ec51/volumes" Dec 10 12:09:06 crc kubenswrapper[4682]: I1210 12:09:06.479136 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:09:06 crc kubenswrapper[4682]: I1210 12:09:06.479782 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:09:11 crc kubenswrapper[4682]: E1210 12:09:11.383345 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:09:11 crc kubenswrapper[4682]: E1210 12:09:11.383758 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:09:24 crc kubenswrapper[4682]: E1210 12:09:24.385443 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:09:25 crc kubenswrapper[4682]: E1210 12:09:25.382485 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:09:36 crc kubenswrapper[4682]: E1210 12:09:36.382516 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:09:36 crc kubenswrapper[4682]: I1210 12:09:36.478276 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:09:36 crc kubenswrapper[4682]: I1210 12:09:36.478345 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:09:39 crc kubenswrapper[4682]: E1210 12:09:39.383128 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:09:47 crc kubenswrapper[4682]: E1210 12:09:47.383639 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:09:53 crc kubenswrapper[4682]: E1210 12:09:53.383559 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:09:59 crc kubenswrapper[4682]: E1210 12:09:59.382644 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:10:04 crc kubenswrapper[4682]: E1210 12:10:04.383389 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:10:06 crc kubenswrapper[4682]: I1210 12:10:06.479094 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:10:06 crc kubenswrapper[4682]: I1210 12:10:06.479641 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:10:06 crc kubenswrapper[4682]: I1210 12:10:06.479715 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 12:10:06 crc kubenswrapper[4682]: I1210 12:10:06.480760 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:10:06 crc kubenswrapper[4682]: I1210 12:10:06.480891 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" gracePeriod=600 Dec 10 12:10:06 crc kubenswrapper[4682]: E1210 12:10:06.601155 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:10:07 crc kubenswrapper[4682]: I1210 12:10:07.542126 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" exitCode=0 Dec 10 12:10:07 crc kubenswrapper[4682]: I1210 12:10:07.542169 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20"} Dec 10 12:10:07 crc kubenswrapper[4682]: I1210 12:10:07.542222 4682 scope.go:117] "RemoveContainer" containerID="08b0f21e5f85b40aa7cb0d289080af87abc18c46b8cd43c7b4bd2e5e8b33365f" Dec 10 12:10:07 crc kubenswrapper[4682]: I1210 12:10:07.542911 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:10:07 crc kubenswrapper[4682]: E1210 12:10:07.543155 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:10:13 crc kubenswrapper[4682]: E1210 12:10:13.383337 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:10:19 crc kubenswrapper[4682]: I1210 12:10:19.381675 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:10:19 crc kubenswrapper[4682]: E1210 12:10:19.383626 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:10:19 crc kubenswrapper[4682]: E1210 12:10:19.385895 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:10:25 crc kubenswrapper[4682]: E1210 12:10:25.382869 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:10:30 crc kubenswrapper[4682]: I1210 12:10:30.391011 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:10:30 crc kubenswrapper[4682]: E1210 12:10:30.391964 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:10:34 crc kubenswrapper[4682]: E1210 12:10:34.382667 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:10:39 crc kubenswrapper[4682]: E1210 12:10:39.396675 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:10:43 crc kubenswrapper[4682]: I1210 12:10:43.381412 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:10:43 crc kubenswrapper[4682]: E1210 12:10:43.382156 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:10:45 crc kubenswrapper[4682]: E1210 12:10:45.384279 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:10:52 crc kubenswrapper[4682]: E1210 12:10:52.383070 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:10:56 crc kubenswrapper[4682]: E1210 12:10:56.384672 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:10:57 crc kubenswrapper[4682]: I1210 12:10:57.380638 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:10:57 crc kubenswrapper[4682]: E1210 12:10:57.381241 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:11:07 crc kubenswrapper[4682]: E1210 12:11:07.383333 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:11:10 crc kubenswrapper[4682]: I1210 12:11:10.402111 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:11:10 crc kubenswrapper[4682]: E1210 12:11:10.403037 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:11:11 crc kubenswrapper[4682]: E1210 12:11:11.386746 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:11:21 crc kubenswrapper[4682]: I1210 12:11:21.381032 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:11:21 crc kubenswrapper[4682]: E1210 12:11:21.383967 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:11:21 crc kubenswrapper[4682]: E1210 12:11:21.384132 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:11:25 crc kubenswrapper[4682]: E1210 12:11:25.383893 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:11:34 crc kubenswrapper[4682]: E1210 12:11:34.383801 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:11:35 crc kubenswrapper[4682]: I1210 12:11:35.576464 4682 generic.go:334] "Generic (PLEG): container finished" podID="11a38c1a-3a98-4c77-82ff-caf76c15fefc" containerID="a6a656420a2248541a05ff3a17035afb06addabffac910809939c19800346bf5" exitCode=2 Dec 10 12:11:35 crc kubenswrapper[4682]: I1210 12:11:35.576528 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" event={"ID":"11a38c1a-3a98-4c77-82ff-caf76c15fefc","Type":"ContainerDied","Data":"a6a656420a2248541a05ff3a17035afb06addabffac910809939c19800346bf5"} Dec 10 12:11:36 crc kubenswrapper[4682]: I1210 12:11:36.382208 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:11:36 crc kubenswrapper[4682]: E1210 12:11:36.382848 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.150532 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.275917 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-inventory\") pod \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.276263 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zklg\" (UniqueName: \"kubernetes.io/projected/11a38c1a-3a98-4c77-82ff-caf76c15fefc-kube-api-access-2zklg\") pod \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.276371 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-ssh-key\") pod \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\" (UID: \"11a38c1a-3a98-4c77-82ff-caf76c15fefc\") " Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.282700 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a38c1a-3a98-4c77-82ff-caf76c15fefc-kube-api-access-2zklg" (OuterVolumeSpecName: "kube-api-access-2zklg") pod "11a38c1a-3a98-4c77-82ff-caf76c15fefc" (UID: "11a38c1a-3a98-4c77-82ff-caf76c15fefc"). InnerVolumeSpecName "kube-api-access-2zklg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.304791 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-inventory" (OuterVolumeSpecName: "inventory") pod "11a38c1a-3a98-4c77-82ff-caf76c15fefc" (UID: "11a38c1a-3a98-4c77-82ff-caf76c15fefc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.305540 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "11a38c1a-3a98-4c77-82ff-caf76c15fefc" (UID: "11a38c1a-3a98-4c77-82ff-caf76c15fefc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.378689 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zklg\" (UniqueName: \"kubernetes.io/projected/11a38c1a-3a98-4c77-82ff-caf76c15fefc-kube-api-access-2zklg\") on node \"crc\" DevicePath \"\"" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.378721 4682 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.378732 4682 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11a38c1a-3a98-4c77-82ff-caf76c15fefc-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.604507 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" event={"ID":"11a38c1a-3a98-4c77-82ff-caf76c15fefc","Type":"ContainerDied","Data":"676ea4a1b1c584b7e14e93a778abf5e47a4e8042a62f2ba154a82958c248caad"} Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.604546 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w" Dec 10 12:11:37 crc kubenswrapper[4682]: I1210 12:11:37.604553 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="676ea4a1b1c584b7e14e93a778abf5e47a4e8042a62f2ba154a82958c248caad" Dec 10 12:11:38 crc kubenswrapper[4682]: E1210 12:11:38.387258 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:11:45 crc kubenswrapper[4682]: E1210 12:11:45.383636 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:11:50 crc kubenswrapper[4682]: I1210 12:11:50.397964 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:11:50 crc kubenswrapper[4682]: E1210 12:11:50.398826 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:11:52 crc kubenswrapper[4682]: E1210 12:11:52.382991 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:11:56 crc kubenswrapper[4682]: E1210 12:11:56.385062 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.051067 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-z8nb5/must-gather-nnl6v"] Dec 10 12:12:01 crc kubenswrapper[4682]: E1210 12:12:01.052110 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="extract-content" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.052127 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="extract-content" Dec 10 12:12:01 crc kubenswrapper[4682]: E1210 12:12:01.052139 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="registry-server" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.052146 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="registry-server" Dec 10 12:12:01 crc kubenswrapper[4682]: E1210 12:12:01.052181 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a38c1a-3a98-4c77-82ff-caf76c15fefc" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.052190 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a38c1a-3a98-4c77-82ff-caf76c15fefc" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:12:01 crc kubenswrapper[4682]: E1210 12:12:01.052204 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="extract-utilities" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.052211 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="extract-utilities" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.052532 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="11a38c1a-3a98-4c77-82ff-caf76c15fefc" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.052575 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b71b903-105e-4f96-88eb-0e540b41ec51" containerName="registry-server" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.054367 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.058717 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-z8nb5"/"openshift-service-ca.crt" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.059145 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-z8nb5"/"kube-root-ca.crt" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.059429 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-z8nb5"/"default-dockercfg-nxvgr" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.067967 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-z8nb5/must-gather-nnl6v"] Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.185737 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzh6x\" (UniqueName: \"kubernetes.io/projected/bcfca302-1a28-4fe3-b059-96f1b8b41aff-kube-api-access-xzh6x\") pod \"must-gather-nnl6v\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.185994 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bcfca302-1a28-4fe3-b059-96f1b8b41aff-must-gather-output\") pod \"must-gather-nnl6v\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.288204 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bcfca302-1a28-4fe3-b059-96f1b8b41aff-must-gather-output\") pod \"must-gather-nnl6v\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.288325 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzh6x\" (UniqueName: \"kubernetes.io/projected/bcfca302-1a28-4fe3-b059-96f1b8b41aff-kube-api-access-xzh6x\") pod \"must-gather-nnl6v\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.288855 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bcfca302-1a28-4fe3-b059-96f1b8b41aff-must-gather-output\") pod \"must-gather-nnl6v\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.325032 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzh6x\" (UniqueName: \"kubernetes.io/projected/bcfca302-1a28-4fe3-b059-96f1b8b41aff-kube-api-access-xzh6x\") pod \"must-gather-nnl6v\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.379390 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:12:01 crc kubenswrapper[4682]: I1210 12:12:01.900973 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-z8nb5/must-gather-nnl6v"] Dec 10 12:12:02 crc kubenswrapper[4682]: I1210 12:12:02.859281 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" event={"ID":"bcfca302-1a28-4fe3-b059-96f1b8b41aff","Type":"ContainerStarted","Data":"6d47753ae16c8586a2da2394f844cea772b9fe00a4439f6e6a373aa34e164a27"} Dec 10 12:12:04 crc kubenswrapper[4682]: I1210 12:12:04.381308 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:12:04 crc kubenswrapper[4682]: E1210 12:12:04.382120 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:12:04 crc kubenswrapper[4682]: E1210 12:12:04.385616 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:12:09 crc kubenswrapper[4682]: E1210 12:12:09.383409 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:12:09 crc kubenswrapper[4682]: I1210 12:12:09.934387 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" event={"ID":"bcfca302-1a28-4fe3-b059-96f1b8b41aff","Type":"ContainerStarted","Data":"02b8458220c53c7f25d06903d7e9ba4378c957a630c0b3fa70818eb06bd10559"} Dec 10 12:12:09 crc kubenswrapper[4682]: I1210 12:12:09.934429 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" event={"ID":"bcfca302-1a28-4fe3-b059-96f1b8b41aff","Type":"ContainerStarted","Data":"a5ea6fd590620139b87675b071b001a4be00b6363cef627d1109f66785049365"} Dec 10 12:12:09 crc kubenswrapper[4682]: I1210 12:12:09.952030 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" podStartSLOduration=1.79734201 podStartE2EDuration="8.95201132s" podCreationTimestamp="2025-12-10 12:12:01 +0000 UTC" firstStartedPulling="2025-12-10 12:12:01.906788904 +0000 UTC m=+5202.226999654" lastFinishedPulling="2025-12-10 12:12:09.061458214 +0000 UTC m=+5209.381668964" observedRunningTime="2025-12-10 12:12:09.950171861 +0000 UTC m=+5210.270382641" watchObservedRunningTime="2025-12-10 12:12:09.95201132 +0000 UTC m=+5210.272222070" Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.586757 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-z8nb5/crc-debug-4p8mm"] Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.588462 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.733212 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a331fab-c45e-485a-a601-f52e14b0c6f1-host\") pod \"crc-debug-4p8mm\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.733551 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shhvn\" (UniqueName: \"kubernetes.io/projected/4a331fab-c45e-485a-a601-f52e14b0c6f1-kube-api-access-shhvn\") pod \"crc-debug-4p8mm\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.835803 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a331fab-c45e-485a-a601-f52e14b0c6f1-host\") pod \"crc-debug-4p8mm\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.836095 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shhvn\" (UniqueName: \"kubernetes.io/projected/4a331fab-c45e-485a-a601-f52e14b0c6f1-kube-api-access-shhvn\") pod \"crc-debug-4p8mm\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:15 crc kubenswrapper[4682]: I1210 12:12:15.835935 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a331fab-c45e-485a-a601-f52e14b0c6f1-host\") pod \"crc-debug-4p8mm\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:16 crc kubenswrapper[4682]: I1210 12:12:16.190063 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shhvn\" (UniqueName: \"kubernetes.io/projected/4a331fab-c45e-485a-a601-f52e14b0c6f1-kube-api-access-shhvn\") pod \"crc-debug-4p8mm\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:16 crc kubenswrapper[4682]: I1210 12:12:16.206187 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:16 crc kubenswrapper[4682]: W1210 12:12:16.236614 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a331fab_c45e_485a_a601_f52e14b0c6f1.slice/crio-94bb8136d582c42839bfc12cb375af457c2dc9d8d77d9c5d1da09a0299a212b6 WatchSource:0}: Error finding container 94bb8136d582c42839bfc12cb375af457c2dc9d8d77d9c5d1da09a0299a212b6: Status 404 returned error can't find the container with id 94bb8136d582c42839bfc12cb375af457c2dc9d8d77d9c5d1da09a0299a212b6 Dec 10 12:12:17 crc kubenswrapper[4682]: I1210 12:12:17.000296 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" event={"ID":"4a331fab-c45e-485a-a601-f52e14b0c6f1","Type":"ContainerStarted","Data":"94bb8136d582c42839bfc12cb375af457c2dc9d8d77d9c5d1da09a0299a212b6"} Dec 10 12:12:17 crc kubenswrapper[4682]: E1210 12:12:17.383799 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:12:19 crc kubenswrapper[4682]: I1210 12:12:19.382320 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:12:19 crc kubenswrapper[4682]: E1210 12:12:19.383345 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:12:22 crc kubenswrapper[4682]: E1210 12:12:22.385334 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:12:29 crc kubenswrapper[4682]: I1210 12:12:29.117122 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" event={"ID":"4a331fab-c45e-485a-a601-f52e14b0c6f1","Type":"ContainerStarted","Data":"23c05d53a2c8d960449d304ca7360b713dbaa2bdf9e155223ce0001ed1b9cded"} Dec 10 12:12:29 crc kubenswrapper[4682]: I1210 12:12:29.140443 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" podStartSLOduration=1.767560588 podStartE2EDuration="14.140420698s" podCreationTimestamp="2025-12-10 12:12:15 +0000 UTC" firstStartedPulling="2025-12-10 12:12:16.238831065 +0000 UTC m=+5216.559041815" lastFinishedPulling="2025-12-10 12:12:28.611691175 +0000 UTC m=+5228.931901925" observedRunningTime="2025-12-10 12:12:29.137567489 +0000 UTC m=+5229.457778239" watchObservedRunningTime="2025-12-10 12:12:29.140420698 +0000 UTC m=+5229.460631448" Dec 10 12:12:30 crc kubenswrapper[4682]: I1210 12:12:30.387188 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:12:30 crc kubenswrapper[4682]: E1210 12:12:30.387726 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:12:31 crc kubenswrapper[4682]: E1210 12:12:31.383434 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:12:33 crc kubenswrapper[4682]: E1210 12:12:33.383159 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:12:45 crc kubenswrapper[4682]: I1210 12:12:45.381652 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:12:45 crc kubenswrapper[4682]: E1210 12:12:45.382321 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:12:45 crc kubenswrapper[4682]: E1210 12:12:45.386788 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:12:46 crc kubenswrapper[4682]: E1210 12:12:46.384406 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:12:51 crc kubenswrapper[4682]: I1210 12:12:51.336202 4682 generic.go:334] "Generic (PLEG): container finished" podID="4a331fab-c45e-485a-a601-f52e14b0c6f1" containerID="23c05d53a2c8d960449d304ca7360b713dbaa2bdf9e155223ce0001ed1b9cded" exitCode=0 Dec 10 12:12:51 crc kubenswrapper[4682]: I1210 12:12:51.336301 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" event={"ID":"4a331fab-c45e-485a-a601-f52e14b0c6f1","Type":"ContainerDied","Data":"23c05d53a2c8d960449d304ca7360b713dbaa2bdf9e155223ce0001ed1b9cded"} Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.495016 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.532942 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-z8nb5/crc-debug-4p8mm"] Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.544675 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-z8nb5/crc-debug-4p8mm"] Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.656313 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shhvn\" (UniqueName: \"kubernetes.io/projected/4a331fab-c45e-485a-a601-f52e14b0c6f1-kube-api-access-shhvn\") pod \"4a331fab-c45e-485a-a601-f52e14b0c6f1\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.656654 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a331fab-c45e-485a-a601-f52e14b0c6f1-host\") pod \"4a331fab-c45e-485a-a601-f52e14b0c6f1\" (UID: \"4a331fab-c45e-485a-a601-f52e14b0c6f1\") " Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.656839 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a331fab-c45e-485a-a601-f52e14b0c6f1-host" (OuterVolumeSpecName: "host") pod "4a331fab-c45e-485a-a601-f52e14b0c6f1" (UID: "4a331fab-c45e-485a-a601-f52e14b0c6f1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.657129 4682 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4a331fab-c45e-485a-a601-f52e14b0c6f1-host\") on node \"crc\" DevicePath \"\"" Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.671735 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a331fab-c45e-485a-a601-f52e14b0c6f1-kube-api-access-shhvn" (OuterVolumeSpecName: "kube-api-access-shhvn") pod "4a331fab-c45e-485a-a601-f52e14b0c6f1" (UID: "4a331fab-c45e-485a-a601-f52e14b0c6f1"). InnerVolumeSpecName "kube-api-access-shhvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:12:52 crc kubenswrapper[4682]: I1210 12:12:52.758797 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shhvn\" (UniqueName: \"kubernetes.io/projected/4a331fab-c45e-485a-a601-f52e14b0c6f1-kube-api-access-shhvn\") on node \"crc\" DevicePath \"\"" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.361249 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94bb8136d582c42839bfc12cb375af457c2dc9d8d77d9c5d1da09a0299a212b6" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.361332 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-4p8mm" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.769798 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-z8nb5/crc-debug-pst74"] Dec 10 12:12:53 crc kubenswrapper[4682]: E1210 12:12:53.770268 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a331fab-c45e-485a-a601-f52e14b0c6f1" containerName="container-00" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.770282 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a331fab-c45e-485a-a601-f52e14b0c6f1" containerName="container-00" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.770532 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a331fab-c45e-485a-a601-f52e14b0c6f1" containerName="container-00" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.771286 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.883673 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cbxn\" (UniqueName: \"kubernetes.io/projected/a5551819-9e19-4ff1-adeb-54f3bad82f0f-kube-api-access-6cbxn\") pod \"crc-debug-pst74\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.884044 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5551819-9e19-4ff1-adeb-54f3bad82f0f-host\") pod \"crc-debug-pst74\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.986336 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cbxn\" (UniqueName: \"kubernetes.io/projected/a5551819-9e19-4ff1-adeb-54f3bad82f0f-kube-api-access-6cbxn\") pod \"crc-debug-pst74\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.986489 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5551819-9e19-4ff1-adeb-54f3bad82f0f-host\") pod \"crc-debug-pst74\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:53 crc kubenswrapper[4682]: I1210 12:12:53.986641 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5551819-9e19-4ff1-adeb-54f3bad82f0f-host\") pod \"crc-debug-pst74\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:54 crc kubenswrapper[4682]: I1210 12:12:54.004846 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cbxn\" (UniqueName: \"kubernetes.io/projected/a5551819-9e19-4ff1-adeb-54f3bad82f0f-kube-api-access-6cbxn\") pod \"crc-debug-pst74\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:54 crc kubenswrapper[4682]: I1210 12:12:54.093517 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:54 crc kubenswrapper[4682]: I1210 12:12:54.371441 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/crc-debug-pst74" event={"ID":"a5551819-9e19-4ff1-adeb-54f3bad82f0f","Type":"ContainerStarted","Data":"85d8376814eee31a4a5703c6d0805eb20618d3b4216c57ef896a2909ee7d71f0"} Dec 10 12:12:54 crc kubenswrapper[4682]: I1210 12:12:54.394172 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a331fab-c45e-485a-a601-f52e14b0c6f1" path="/var/lib/kubelet/pods/4a331fab-c45e-485a-a601-f52e14b0c6f1/volumes" Dec 10 12:12:55 crc kubenswrapper[4682]: I1210 12:12:55.383244 4682 generic.go:334] "Generic (PLEG): container finished" podID="a5551819-9e19-4ff1-adeb-54f3bad82f0f" containerID="0b4efef97628443333a62d44f1d38fb1da5e91f30fa1fd37df9630e94ac85e3a" exitCode=1 Dec 10 12:12:55 crc kubenswrapper[4682]: I1210 12:12:55.383298 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/crc-debug-pst74" event={"ID":"a5551819-9e19-4ff1-adeb-54f3bad82f0f","Type":"ContainerDied","Data":"0b4efef97628443333a62d44f1d38fb1da5e91f30fa1fd37df9630e94ac85e3a"} Dec 10 12:12:55 crc kubenswrapper[4682]: I1210 12:12:55.428239 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-z8nb5/crc-debug-pst74"] Dec 10 12:12:55 crc kubenswrapper[4682]: I1210 12:12:55.438606 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-z8nb5/crc-debug-pst74"] Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.521959 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.586878 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5551819-9e19-4ff1-adeb-54f3bad82f0f-host\") pod \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.587003 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a5551819-9e19-4ff1-adeb-54f3bad82f0f-host" (OuterVolumeSpecName: "host") pod "a5551819-9e19-4ff1-adeb-54f3bad82f0f" (UID: "a5551819-9e19-4ff1-adeb-54f3bad82f0f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.587488 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cbxn\" (UniqueName: \"kubernetes.io/projected/a5551819-9e19-4ff1-adeb-54f3bad82f0f-kube-api-access-6cbxn\") pod \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\" (UID: \"a5551819-9e19-4ff1-adeb-54f3bad82f0f\") " Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.587961 4682 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5551819-9e19-4ff1-adeb-54f3bad82f0f-host\") on node \"crc\" DevicePath \"\"" Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.603201 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5551819-9e19-4ff1-adeb-54f3bad82f0f-kube-api-access-6cbxn" (OuterVolumeSpecName: "kube-api-access-6cbxn") pod "a5551819-9e19-4ff1-adeb-54f3bad82f0f" (UID: "a5551819-9e19-4ff1-adeb-54f3bad82f0f"). InnerVolumeSpecName "kube-api-access-6cbxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:12:56 crc kubenswrapper[4682]: I1210 12:12:56.689739 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cbxn\" (UniqueName: \"kubernetes.io/projected/a5551819-9e19-4ff1-adeb-54f3bad82f0f-kube-api-access-6cbxn\") on node \"crc\" DevicePath \"\"" Dec 10 12:12:57 crc kubenswrapper[4682]: E1210 12:12:57.382980 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:12:57 crc kubenswrapper[4682]: I1210 12:12:57.407004 4682 scope.go:117] "RemoveContainer" containerID="0b4efef97628443333a62d44f1d38fb1da5e91f30fa1fd37df9630e94ac85e3a" Dec 10 12:12:57 crc kubenswrapper[4682]: I1210 12:12:57.407070 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/crc-debug-pst74" Dec 10 12:12:58 crc kubenswrapper[4682]: E1210 12:12:58.383238 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:12:58 crc kubenswrapper[4682]: I1210 12:12:58.394215 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5551819-9e19-4ff1-adeb-54f3bad82f0f" path="/var/lib/kubelet/pods/a5551819-9e19-4ff1-adeb-54f3bad82f0f/volumes" Dec 10 12:12:59 crc kubenswrapper[4682]: I1210 12:12:59.382020 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:12:59 crc kubenswrapper[4682]: E1210 12:12:59.382685 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:13:09 crc kubenswrapper[4682]: E1210 12:13:09.383316 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:13:11 crc kubenswrapper[4682]: E1210 12:13:11.383092 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:13:13 crc kubenswrapper[4682]: I1210 12:13:13.380996 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:13:13 crc kubenswrapper[4682]: E1210 12:13:13.381474 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:13:20 crc kubenswrapper[4682]: E1210 12:13:20.394437 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:13:23 crc kubenswrapper[4682]: I1210 12:13:23.383539 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:13:23 crc kubenswrapper[4682]: E1210 12:13:23.511530 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:13:23 crc kubenswrapper[4682]: E1210 12:13:23.511604 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:13:23 crc kubenswrapper[4682]: E1210 12:13:23.511766 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:13:23 crc kubenswrapper[4682]: E1210 12:13:23.513156 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:13:27 crc kubenswrapper[4682]: I1210 12:13:27.381271 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:13:27 crc kubenswrapper[4682]: E1210 12:13:27.381913 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:13:34 crc kubenswrapper[4682]: E1210 12:13:34.509281 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:13:34 crc kubenswrapper[4682]: E1210 12:13:34.509825 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:13:34 crc kubenswrapper[4682]: E1210 12:13:34.509938 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:13:34 crc kubenswrapper[4682]: E1210 12:13:34.511116 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:13:37 crc kubenswrapper[4682]: E1210 12:13:37.384226 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:13:38 crc kubenswrapper[4682]: I1210 12:13:38.381978 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:13:38 crc kubenswrapper[4682]: E1210 12:13:38.382263 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:13:41 crc kubenswrapper[4682]: I1210 12:13:41.623544 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_55723944-339e-4ed6-9159-9696ca1debeb/init-config-reloader/0.log" Dec 10 12:13:41 crc kubenswrapper[4682]: I1210 12:13:41.795287 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_55723944-339e-4ed6-9159-9696ca1debeb/init-config-reloader/0.log" Dec 10 12:13:41 crc kubenswrapper[4682]: I1210 12:13:41.815655 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_55723944-339e-4ed6-9159-9696ca1debeb/alertmanager/0.log" Dec 10 12:13:41 crc kubenswrapper[4682]: I1210 12:13:41.844035 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_55723944-339e-4ed6-9159-9696ca1debeb/config-reloader/0.log" Dec 10 12:13:41 crc kubenswrapper[4682]: I1210 12:13:41.985879 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-f48d9bb94-4d9lk_2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f/barbican-api/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.059622 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-f48d9bb94-4d9lk_2ecd162a-a0b4-4cbc-9e5a-35a2c24d524f/barbican-api-log/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.116984 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6b5dc89858-bj5qb_4f04d5f7-0e27-4de0-83c3-10a07dcbc97d/barbican-keystone-listener/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.227678 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6b5dc89858-bj5qb_4f04d5f7-0e27-4de0-83c3-10a07dcbc97d/barbican-keystone-listener-log/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.308431 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-84898968bc-8j5tc_8b0a3dc4-5e16-4425-b932-e58a3cd2295a/barbican-worker/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.375249 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-84898968bc-8j5tc_8b0a3dc4-5e16-4425-b932-e58a3cd2295a/barbican-worker-log/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.571825 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-qvzpm_a02eab3d-1fa5-4960-bf40-d9822a5c9122/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.725876 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_58163ec6-c74c-4db2-aad7-c5f598a75856/ceilometer-notification-agent/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.760835 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_58163ec6-c74c-4db2-aad7-c5f598a75856/sg-core/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.770875 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_58163ec6-c74c-4db2-aad7-c5f598a75856/proxy-httpd/0.log" Dec 10 12:13:42 crc kubenswrapper[4682]: I1210 12:13:42.954192 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_393918ae-0996-472c-9f98-1862109d9f54/cinder-api-log/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.003283 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_393918ae-0996-472c-9f98-1862109d9f54/cinder-api/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.133401 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_9860c609-4b4e-4bdd-a72c-6760a86226b7/cinder-scheduler/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.255747 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_9860c609-4b4e-4bdd-a72c-6760a86226b7/probe/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.320185 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-api-0_c414c980-13a0-4869-b74e-f9352e92e527/cloudkitty-api/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.383187 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-api-0_c414c980-13a0-4869-b74e-f9352e92e527/cloudkitty-api-log/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.560525 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-compactor-0_c3952c83-e815-459a-bcef-7ab66596b7d2/loki-compactor/0.log" Dec 10 12:13:43 crc kubenswrapper[4682]: I1210 12:13:43.937916 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-distributor-664b687b54-w4wxz_74c8133d-aa41-4891-8a66-fafa28cfd141/loki-distributor/0.log" Dec 10 12:13:44 crc kubenswrapper[4682]: I1210 12:13:44.016738 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-gateway-bc75944f-dm6gz_613faa0b-14df-452d-820e-1d3e589b183c/gateway/0.log" Dec 10 12:13:44 crc kubenswrapper[4682]: I1210 12:13:44.159415 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-gateway-bc75944f-m4qwq_180ae48d-ecb1-4485-b26b-ebaed9cf17e9/gateway/0.log" Dec 10 12:13:44 crc kubenswrapper[4682]: I1210 12:13:44.322659 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-index-gateway-0_4e20f0f2-e92b-4915-a6c8-cff3c50773fc/loki-index-gateway/0.log" Dec 10 12:13:44 crc kubenswrapper[4682]: I1210 12:13:44.549490 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-ingester-0_ea1f94a0-5b00-4aac-85ae-f7af9df196b6/loki-ingester/0.log" Dec 10 12:13:44 crc kubenswrapper[4682]: I1210 12:13:44.651632 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-querier-5467947bf7-wwt8c_6e286958-b529-4f19-b8e3-164e6fe16e70/loki-querier/0.log" Dec 10 12:13:44 crc kubenswrapper[4682]: I1210 12:13:44.778489 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-lokistack-query-frontend-7c8cd744d9-5wfrz_31331344-4f2b-497d-9683-ea3e235bf0df/loki-query-frontend/0.log" Dec 10 12:13:45 crc kubenswrapper[4682]: I1210 12:13:45.048453 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5475ccd585-x8798_33cd4736-a475-41db-acb5-28015f2cf6a0/init/0.log" Dec 10 12:13:45 crc kubenswrapper[4682]: I1210 12:13:45.240462 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5475ccd585-x8798_33cd4736-a475-41db-acb5-28015f2cf6a0/init/0.log" Dec 10 12:13:45 crc kubenswrapper[4682]: I1210 12:13:45.247749 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5475ccd585-x8798_33cd4736-a475-41db-acb5-28015f2cf6a0/dnsmasq-dns/0.log" Dec 10 12:13:45 crc kubenswrapper[4682]: I1210 12:13:45.948748 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-49h6v_df88b6db-13a9-4d76-a9da-e259ef1f79a2/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:45 crc kubenswrapper[4682]: I1210 12:13:45.949191 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-89qdd_8dba7fc9-e8db-4ad6-b95c-ec9c2f3fe330/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.202441 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-rqc4w_11a38c1a-3a98-4c77-82ff-caf76c15fefc/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.255124 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-hlrmm_09844e48-f7bc-4c51-9dfa-dcc6daafb27f/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.472292 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-sk7rb_29311a90-82aa-4b3f-a171-f7d45d0b9dc1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.484597 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-smtgq_8d02d5aa-758d-49b4-aa9e-77062c9af129/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.693226 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-zlcgn_bdc567ce-9075-470e-867a-ffd15f55c152/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.808011 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_13896178-eabd-4d1a-ad7c-8763c9b4f396/glance-httpd/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.939831 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_13896178-eabd-4d1a-ad7c-8763c9b4f396/glance-log/0.log" Dec 10 12:13:46 crc kubenswrapper[4682]: I1210 12:13:46.999005 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_38946e9d-e072-4758-8afb-dbdafdec204d/glance-httpd/0.log" Dec 10 12:13:47 crc kubenswrapper[4682]: I1210 12:13:47.095559 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_38946e9d-e072-4758-8afb-dbdafdec204d/glance-log/0.log" Dec 10 12:13:47 crc kubenswrapper[4682]: I1210 12:13:47.741337 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29422801-lnk8m_da98ce99-0e03-465a-9d86-0e3cb7fbcb59/keystone-cron/0.log" Dec 10 12:13:47 crc kubenswrapper[4682]: I1210 12:13:47.924413 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-548d5df8d4-8fcdl_66f5310d-7fea-4c01-8fe5-fe6ec16b3c68/keystone-api/0.log" Dec 10 12:13:47 crc kubenswrapper[4682]: I1210 12:13:47.964369 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_d282a8a0-6a52-4e7c-8fd1-0518ee8c4a7a/kube-state-metrics/0.log" Dec 10 12:13:48 crc kubenswrapper[4682]: I1210 12:13:48.248951 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7f7fc58469-rvhd4_ec3a169e-4679-409e-a778-f88b4972abf8/neutron-api/0.log" Dec 10 12:13:48 crc kubenswrapper[4682]: I1210 12:13:48.281575 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7f7fc58469-rvhd4_ec3a169e-4679-409e-a778-f88b4972abf8/neutron-httpd/0.log" Dec 10 12:13:48 crc kubenswrapper[4682]: I1210 12:13:48.670748 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6014f5d3-c141-4ace-b793-2fa5aaa2c856/nova-api-log/0.log" Dec 10 12:13:48 crc kubenswrapper[4682]: I1210 12:13:48.905967 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cloudkitty-proc-0_dd5568e0-970f-4053-a407-8cd3070630b8/cloudkitty-proc/0.log" Dec 10 12:13:48 crc kubenswrapper[4682]: I1210 12:13:48.976951 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_6bf01d55-c09a-4228-8a66-40d1a9f12e0d/nova-cell0-conductor-conductor/0.log" Dec 10 12:13:49 crc kubenswrapper[4682]: I1210 12:13:49.025672 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_6014f5d3-c141-4ace-b793-2fa5aaa2c856/nova-api-api/0.log" Dec 10 12:13:49 crc kubenswrapper[4682]: I1210 12:13:49.239312 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_adedb4ee-2f85-464c-8a00-83a86ec2ad28/nova-cell1-conductor-conductor/0.log" Dec 10 12:13:49 crc kubenswrapper[4682]: I1210 12:13:49.322256 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_160b22a6-2d74-4e00-ac9c-1c12f3af4190/nova-cell1-novncproxy-novncproxy/0.log" Dec 10 12:13:49 crc kubenswrapper[4682]: E1210 12:13:49.382674 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:13:49 crc kubenswrapper[4682]: E1210 12:13:49.382743 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:13:49 crc kubenswrapper[4682]: I1210 12:13:49.623406 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_bbfbd576-92c3-44d4-bdcf-8e17e0c65946/nova-metadata-log/0.log" Dec 10 12:13:49 crc kubenswrapper[4682]: I1210 12:13:49.730686 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_daab815f-ce3f-44be-8fbf-5a75b4379ccf/nova-scheduler-scheduler/0.log" Dec 10 12:13:49 crc kubenswrapper[4682]: I1210 12:13:49.866017 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d9f85710-54c3-4f30-88f6-bb97f9a200e8/mysql-bootstrap/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.130420 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d9f85710-54c3-4f30-88f6-bb97f9a200e8/mysql-bootstrap/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.163294 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d9f85710-54c3-4f30-88f6-bb97f9a200e8/galera/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.343568 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1b6b3db6-e7bd-4c87-a35a-1f398c40436e/mysql-bootstrap/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.529519 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1b6b3db6-e7bd-4c87-a35a-1f398c40436e/galera/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.551757 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1b6b3db6-e7bd-4c87-a35a-1f398c40436e/mysql-bootstrap/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.724805 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_6073de7e-e347-4fb0-b607-21aaf92384b1/openstackclient/0.log" Dec 10 12:13:50 crc kubenswrapper[4682]: I1210 12:13:50.864494 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-w9qrd_26b1eabc-8b9f-4f9d-99ba-8c79f047e55e/openstack-network-exporter/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.010964 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4b2ch_8ee7ede4-07ea-4b15-88e7-15477c99d5ab/ovsdb-server-init/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.306427 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_bbfbd576-92c3-44d4-bdcf-8e17e0c65946/nova-metadata-metadata/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.466886 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4b2ch_8ee7ede4-07ea-4b15-88e7-15477c99d5ab/ovsdb-server/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.504269 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4b2ch_8ee7ede4-07ea-4b15-88e7-15477c99d5ab/ovsdb-server-init/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.516041 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-4b2ch_8ee7ede4-07ea-4b15-88e7-15477c99d5ab/ovs-vswitchd/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.692571 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-w7jxw_df9d7d76-fa02-41c5-b652-ea9b7b00bd00/ovn-controller/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.771645 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_88e4bd3e-e940-489b-9d88-d40fd96bf0cd/openstack-network-exporter/0.log" Dec 10 12:13:51 crc kubenswrapper[4682]: I1210 12:13:51.892642 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_88e4bd3e-e940-489b-9d88-d40fd96bf0cd/ovn-northd/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.041290 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_97a3c791-f1b7-4665-ae8b-fa87d1ee73e1/openstack-network-exporter/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.042030 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_97a3c791-f1b7-4665-ae8b-fa87d1ee73e1/ovsdbserver-nb/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.297384 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_13b6a06f-420a-420d-8a7c-5a80d312ec79/openstack-network-exporter/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.308018 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_13b6a06f-420a-420d-8a7c-5a80d312ec79/ovsdbserver-sb/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.450394 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6548f86b64-snz6f_846c1791-e576-402b-b8f1-2222c7dd6c4b/placement-api/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.557465 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6548f86b64-snz6f_846c1791-e576-402b-b8f1-2222c7dd6c4b/placement-log/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.733163 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_f598ce2d-df0a-4477-8c89-126cc5d3a5be/init-config-reloader/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.944197 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_f598ce2d-df0a-4477-8c89-126cc5d3a5be/init-config-reloader/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.952001 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_f598ce2d-df0a-4477-8c89-126cc5d3a5be/config-reloader/0.log" Dec 10 12:13:52 crc kubenswrapper[4682]: I1210 12:13:52.954253 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_f598ce2d-df0a-4477-8c89-126cc5d3a5be/prometheus/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.002793 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_f598ce2d-df0a-4477-8c89-126cc5d3a5be/thanos-sidecar/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.279346 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b2bb3f39-3fa9-42c1-abea-06fd2630a819/setup-container/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.381647 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:13:53 crc kubenswrapper[4682]: E1210 12:13:53.381990 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.476567 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b2bb3f39-3fa9-42c1-abea-06fd2630a819/setup-container/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.508730 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_ce19556c-31cc-4e0a-b092-c5cfb2cf815a/setup-container/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.522448 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b2bb3f39-3fa9-42c1-abea-06fd2630a819/rabbitmq/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.884071 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_ce19556c-31cc-4e0a-b092-c5cfb2cf815a/rabbitmq/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.901962 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-dhhsr_667432f0-bad4-4a31-9f30-29daa0e52f73/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:53 crc kubenswrapper[4682]: I1210 12:13:53.920223 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_ce19556c-31cc-4e0a-b092-c5cfb2cf815a/setup-container/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.174765 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-2wsln_435fb604-dad7-4d75-bb61-2e4ccf57d2b3/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.426165 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7475fff587-94bkc_613d10c2-81be-4ff7-8f40-528d35c931e0/proxy-httpd/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.438987 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-n9b92_4c235968-0ec4-4c4f-98c4-6b19fa58e826/swift-ring-rebalance/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.468652 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7475fff587-94bkc_613d10c2-81be-4ff7-8f40-528d35c931e0/proxy-server/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.697177 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/account-reaper/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.718665 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/account-auditor/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.762777 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/account-replicator/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.921885 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/account-server/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.953078 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/container-auditor/0.log" Dec 10 12:13:54 crc kubenswrapper[4682]: I1210 12:13:54.971736 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/container-replicator/0.log" Dec 10 12:13:55 crc kubenswrapper[4682]: I1210 12:13:55.016609 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/container-server/0.log" Dec 10 12:13:55 crc kubenswrapper[4682]: I1210 12:13:55.181068 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/container-updater/0.log" Dec 10 12:13:55 crc kubenswrapper[4682]: I1210 12:13:55.207366 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/object-expirer/0.log" Dec 10 12:13:55 crc kubenswrapper[4682]: I1210 12:13:55.233063 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/object-auditor/0.log" Dec 10 12:13:55 crc kubenswrapper[4682]: I1210 12:13:55.268448 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/object-replicator/0.log" Dec 10 12:13:56 crc kubenswrapper[4682]: I1210 12:13:56.093286 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/rsync/0.log" Dec 10 12:13:56 crc kubenswrapper[4682]: I1210 12:13:56.111770 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/object-updater/0.log" Dec 10 12:13:56 crc kubenswrapper[4682]: I1210 12:13:56.137436 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/swift-recon-cron/0.log" Dec 10 12:13:56 crc kubenswrapper[4682]: I1210 12:13:56.167849 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_b7a82b72-0262-4a74-becf-36ead02cb92c/object-server/0.log" Dec 10 12:14:01 crc kubenswrapper[4682]: I1210 12:14:01.307865 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_63a61a31-230d-455f-b27a-87760ae46c25/memcached/0.log" Dec 10 12:14:01 crc kubenswrapper[4682]: E1210 12:14:01.383201 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:14:03 crc kubenswrapper[4682]: E1210 12:14:03.382179 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:14:04 crc kubenswrapper[4682]: I1210 12:14:04.381358 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:14:04 crc kubenswrapper[4682]: E1210 12:14:04.381880 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:14:12 crc kubenswrapper[4682]: E1210 12:14:12.383936 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:14:14 crc kubenswrapper[4682]: E1210 12:14:14.385746 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:14:19 crc kubenswrapper[4682]: I1210 12:14:19.381646 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:14:19 crc kubenswrapper[4682]: E1210 12:14:19.383616 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:14:24 crc kubenswrapper[4682]: E1210 12:14:24.384089 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.521249 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/util/0.log" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.741059 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/util/0.log" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.742264 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/pull/0.log" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.745782 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/pull/0.log" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.894067 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/util/0.log" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.931131 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/extract/0.log" Dec 10 12:14:24 crc kubenswrapper[4682]: I1210 12:14:24.931577 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_0d3e900286c6ddceb3920f95bb3713bec0f74f6fff2c5179e680d841e2vnksb_e903d396-7f4d-415c-8c7a-802cf7937946/pull/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.067726 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-6lth6_4985e1e4-e9fa-406a-a744-45d9e9dc8135/kube-rbac-proxy/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.157340 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-6lth6_4985e1e4-e9fa-406a-a744-45d9e9dc8135/manager/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.169554 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-j9rxl_41b81f6b-1509-4330-b9b7-8692c065e8d0/kube-rbac-proxy/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.267990 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-j9rxl_41b81f6b-1509-4330-b9b7-8692c065e8d0/manager/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.400949 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-ldjzz_ecddf494-21c9-4fe4-9431-a61d9bc6ba0d/kube-rbac-proxy/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.411208 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-ldjzz_ecddf494-21c9-4fe4-9431-a61d9bc6ba0d/manager/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.562415 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-b28jt_35d24c54-906b-406e-b03e-9fe2008fbb10/kube-rbac-proxy/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.638386 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-26vbl_539df8ed-9553-4ce0-be01-36055d2ab100/kube-rbac-proxy/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.642650 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-b28jt_35d24c54-906b-406e-b03e-9fe2008fbb10/manager/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.757782 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-26vbl_539df8ed-9553-4ce0-be01-36055d2ab100/manager/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.830072 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-s7vjn_754f75d2-ce2a-4983-a82a-c62a2ffb2b04/kube-rbac-proxy/0.log" Dec 10 12:14:25 crc kubenswrapper[4682]: I1210 12:14:25.880247 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-s7vjn_754f75d2-ce2a-4983-a82a-c62a2ffb2b04/manager/0.log" Dec 10 12:14:26 crc kubenswrapper[4682]: I1210 12:14:26.027844 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-lffwd_84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f/kube-rbac-proxy/0.log" Dec 10 12:14:26 crc kubenswrapper[4682]: I1210 12:14:26.371957 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-lffwd_84b66333-d0e8-4b9f-8ec4-a15fdf96fa8f/manager/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.018838 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-cxbm8_00c2e072-614d-483b-a9da-86f271a88095/manager/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.023796 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-cxbm8_00c2e072-614d-483b-a9da-86f271a88095/kube-rbac-proxy/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.043040 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-mkfhq_79c97552-a229-4d38-ac96-79c2ef3303bf/kube-rbac-proxy/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.187618 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-tv4q2_c69a769f-919b-4cf6-9957-a4cdc2a8f8d7/kube-rbac-proxy/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.416485 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-5jw7k_78994f55-53cc-46ce-a67f-8bcde14796f4/kube-rbac-proxy/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.531361 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-tv4q2_c69a769f-919b-4cf6-9957-a4cdc2a8f8d7/manager/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.592663 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-5jw7k_78994f55-53cc-46ce-a67f-8bcde14796f4/manager/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.599731 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-mkfhq_79c97552-a229-4d38-ac96-79c2ef3303bf/manager/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.746052 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-mnl7f_c3051489-ad76-489d-b143-a913219881da/kube-rbac-proxy/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.816379 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-mnl7f_c3051489-ad76-489d-b143-a913219881da/manager/0.log" Dec 10 12:14:27 crc kubenswrapper[4682]: I1210 12:14:27.847887 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-9rc5v_0f6f4969-902f-44e4-a29e-fcb24ce0d7e4/kube-rbac-proxy/0.log" Dec 10 12:14:28 crc kubenswrapper[4682]: I1210 12:14:28.026410 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-lddjn_3224e18d-9f3d-4c9c-abb9-eed4fa24989c/kube-rbac-proxy/0.log" Dec 10 12:14:28 crc kubenswrapper[4682]: I1210 12:14:28.078606 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-9rc5v_0f6f4969-902f-44e4-a29e-fcb24ce0d7e4/manager/0.log" Dec 10 12:14:28 crc kubenswrapper[4682]: I1210 12:14:28.082977 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-lddjn_3224e18d-9f3d-4c9c-abb9-eed4fa24989c/manager/0.log" Dec 10 12:14:28 crc kubenswrapper[4682]: I1210 12:14:28.175984 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fdgsbh_a4641319-ef96-4ffb-ac2e-a35154984ba8/kube-rbac-proxy/0.log" Dec 10 12:14:28 crc kubenswrapper[4682]: I1210 12:14:28.213627 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fdgsbh_a4641319-ef96-4ffb-ac2e-a35154984ba8/manager/0.log" Dec 10 12:14:28 crc kubenswrapper[4682]: E1210 12:14:28.384419 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:14:29 crc kubenswrapper[4682]: I1210 12:14:29.302588 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7b77d4dbbf-7lq6p_c7b3146b-cbe2-443c-b721-060df70df8ed/operator/0.log" Dec 10 12:14:29 crc kubenswrapper[4682]: I1210 12:14:29.405799 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fg9v7_a0ccaebf-eedd-4bb2-927c-6d59100df2b3/registry-server/0.log" Dec 10 12:14:29 crc kubenswrapper[4682]: I1210 12:14:29.441982 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-5k89m_091fd04a-949b-4f31-8c04-80402b84ac36/kube-rbac-proxy/0.log" Dec 10 12:14:29 crc kubenswrapper[4682]: I1210 12:14:29.558985 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-5k89m_091fd04a-949b-4f31-8c04-80402b84ac36/manager/0.log" Dec 10 12:14:29 crc kubenswrapper[4682]: I1210 12:14:29.682625 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-lttng_0d277084-bc96-4bcb-a090-76ef7e2f385e/manager/0.log" Dec 10 12:14:29 crc kubenswrapper[4682]: I1210 12:14:29.761427 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-lttng_0d277084-bc96-4bcb-a090-76ef7e2f385e/kube-rbac-proxy/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.029112 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-76vm7_b92a5136-09a9-49c1-ad89-bf46bccb9d45/operator/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.039388 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-v9vrr_028aa123-014b-4836-a8d8-e0acafea568f/kube-rbac-proxy/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.140771 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-v9vrr_028aa123-014b-4836-a8d8-e0acafea568f/manager/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.281246 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-678c445b7b-gz66r_31129f99-dd83-4b51-a741-5629f1f825fb/manager/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.343000 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d54d59bc-cjf8w_c2238ca5-7b77-471b-a743-75e076a61ce1/kube-rbac-proxy/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.388748 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:14:30 crc kubenswrapper[4682]: E1210 12:14:30.389118 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.471488 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-fz6ds_8f9fd772-c4a8-48d1-8294-c0572ad44506/manager/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.493203 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-fz6ds_8f9fd772-c4a8-48d1-8294-c0572ad44506/kube-rbac-proxy/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.659531 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-5x7z6_f76d2d54-5cd6-4e5c-b719-92117a1e6cb9/manager/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.695397 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-5x7z6_f76d2d54-5cd6-4e5c-b719-92117a1e6cb9/kube-rbac-proxy/0.log" Dec 10 12:14:30 crc kubenswrapper[4682]: I1210 12:14:30.800896 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54d54d59bc-cjf8w_c2238ca5-7b77-471b-a743-75e076a61ce1/manager/0.log" Dec 10 12:14:39 crc kubenswrapper[4682]: E1210 12:14:39.383409 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:14:39 crc kubenswrapper[4682]: E1210 12:14:39.383538 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:14:45 crc kubenswrapper[4682]: I1210 12:14:45.381614 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:14:45 crc kubenswrapper[4682]: E1210 12:14:45.382412 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:14:50 crc kubenswrapper[4682]: I1210 12:14:50.193838 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-9gwb2_fb0db14b-539a-489f-baea-92c499d99906/control-plane-machine-set-operator/0.log" Dec 10 12:14:50 crc kubenswrapper[4682]: I1210 12:14:50.321037 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-dqndv_8696312f-d81d-442b-b80c-6938db27e66b/kube-rbac-proxy/0.log" Dec 10 12:14:50 crc kubenswrapper[4682]: I1210 12:14:50.396481 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-dqndv_8696312f-d81d-442b-b80c-6938db27e66b/machine-api-operator/0.log" Dec 10 12:14:51 crc kubenswrapper[4682]: E1210 12:14:51.384057 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:14:54 crc kubenswrapper[4682]: E1210 12:14:54.384057 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:14:58 crc kubenswrapper[4682]: I1210 12:14:58.381937 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:14:58 crc kubenswrapper[4682]: E1210 12:14:58.383198 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.186456 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg"] Dec 10 12:15:00 crc kubenswrapper[4682]: E1210 12:15:00.186982 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5551819-9e19-4ff1-adeb-54f3bad82f0f" containerName="container-00" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.186999 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5551819-9e19-4ff1-adeb-54f3bad82f0f" containerName="container-00" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.187210 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5551819-9e19-4ff1-adeb-54f3bad82f0f" containerName="container-00" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.187976 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.193515 4682 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.193522 4682 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.205835 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg"] Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.371320 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2764964e-6bf4-4413-adb3-8a4f95d42809-config-volume\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.371502 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmtgh\" (UniqueName: \"kubernetes.io/projected/2764964e-6bf4-4413-adb3-8a4f95d42809-kube-api-access-nmtgh\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.371566 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2764964e-6bf4-4413-adb3-8a4f95d42809-secret-volume\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.472998 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmtgh\" (UniqueName: \"kubernetes.io/projected/2764964e-6bf4-4413-adb3-8a4f95d42809-kube-api-access-nmtgh\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.473086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2764964e-6bf4-4413-adb3-8a4f95d42809-secret-volume\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.473170 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2764964e-6bf4-4413-adb3-8a4f95d42809-config-volume\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.473954 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2764964e-6bf4-4413-adb3-8a4f95d42809-config-volume\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.479242 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2764964e-6bf4-4413-adb3-8a4f95d42809-secret-volume\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.492328 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmtgh\" (UniqueName: \"kubernetes.io/projected/2764964e-6bf4-4413-adb3-8a4f95d42809-kube-api-access-nmtgh\") pod \"collect-profiles-29422815-pqbbg\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:00 crc kubenswrapper[4682]: I1210 12:15:00.525958 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:01 crc kubenswrapper[4682]: I1210 12:15:01.028990 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg"] Dec 10 12:15:01 crc kubenswrapper[4682]: I1210 12:15:01.709545 4682 generic.go:334] "Generic (PLEG): container finished" podID="2764964e-6bf4-4413-adb3-8a4f95d42809" containerID="e006decc188c4fdba63984fdc1f53ad1f9e2161a4e61fe388ec705cb308677fa" exitCode=0 Dec 10 12:15:01 crc kubenswrapper[4682]: I1210 12:15:01.709706 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" event={"ID":"2764964e-6bf4-4413-adb3-8a4f95d42809","Type":"ContainerDied","Data":"e006decc188c4fdba63984fdc1f53ad1f9e2161a4e61fe388ec705cb308677fa"} Dec 10 12:15:01 crc kubenswrapper[4682]: I1210 12:15:01.709874 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" event={"ID":"2764964e-6bf4-4413-adb3-8a4f95d42809","Type":"ContainerStarted","Data":"6f902303352fa430e7e4ffacd7d9be303ca465fec877878ae59c27cac33bc520"} Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.109527 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.242436 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmtgh\" (UniqueName: \"kubernetes.io/projected/2764964e-6bf4-4413-adb3-8a4f95d42809-kube-api-access-nmtgh\") pod \"2764964e-6bf4-4413-adb3-8a4f95d42809\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.242887 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2764964e-6bf4-4413-adb3-8a4f95d42809-config-volume\") pod \"2764964e-6bf4-4413-adb3-8a4f95d42809\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.242931 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2764964e-6bf4-4413-adb3-8a4f95d42809-secret-volume\") pod \"2764964e-6bf4-4413-adb3-8a4f95d42809\" (UID: \"2764964e-6bf4-4413-adb3-8a4f95d42809\") " Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.243253 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2764964e-6bf4-4413-adb3-8a4f95d42809-config-volume" (OuterVolumeSpecName: "config-volume") pod "2764964e-6bf4-4413-adb3-8a4f95d42809" (UID: "2764964e-6bf4-4413-adb3-8a4f95d42809"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.243759 4682 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2764964e-6bf4-4413-adb3-8a4f95d42809-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:03 crc kubenswrapper[4682]: E1210 12:15:03.383281 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.686605 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2764964e-6bf4-4413-adb3-8a4f95d42809-kube-api-access-nmtgh" (OuterVolumeSpecName: "kube-api-access-nmtgh") pod "2764964e-6bf4-4413-adb3-8a4f95d42809" (UID: "2764964e-6bf4-4413-adb3-8a4f95d42809"). InnerVolumeSpecName "kube-api-access-nmtgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.695619 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2764964e-6bf4-4413-adb3-8a4f95d42809-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2764964e-6bf4-4413-adb3-8a4f95d42809" (UID: "2764964e-6bf4-4413-adb3-8a4f95d42809"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.735213 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" event={"ID":"2764964e-6bf4-4413-adb3-8a4f95d42809","Type":"ContainerDied","Data":"6f902303352fa430e7e4ffacd7d9be303ca465fec877878ae59c27cac33bc520"} Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.735256 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f902303352fa430e7e4ffacd7d9be303ca465fec877878ae59c27cac33bc520" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.735762 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422815-pqbbg" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.755342 4682 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2764964e-6bf4-4413-adb3-8a4f95d42809-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:03 crc kubenswrapper[4682]: I1210 12:15:03.755369 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmtgh\" (UniqueName: \"kubernetes.io/projected/2764964e-6bf4-4413-adb3-8a4f95d42809-kube-api-access-nmtgh\") on node \"crc\" DevicePath \"\"" Dec 10 12:15:04 crc kubenswrapper[4682]: I1210 12:15:04.199799 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d"] Dec 10 12:15:04 crc kubenswrapper[4682]: I1210 12:15:04.207328 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422770-rv25d"] Dec 10 12:15:04 crc kubenswrapper[4682]: I1210 12:15:04.395693 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3877c815-0840-4da9-a28b-6539f8c186a6" path="/var/lib/kubelet/pods/3877c815-0840-4da9-a28b-6539f8c186a6/volumes" Dec 10 12:15:04 crc kubenswrapper[4682]: I1210 12:15:04.885823 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-zsncd_34fe5718-bdcd-4e01-8d46-5033469ecee0/cert-manager-controller/0.log" Dec 10 12:15:05 crc kubenswrapper[4682]: I1210 12:15:05.030041 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mqkpw_2ad8f556-5e94-447b-9ec3-cd5c29885e2a/cert-manager-cainjector/0.log" Dec 10 12:15:05 crc kubenswrapper[4682]: I1210 12:15:05.071797 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-q7f8s_e8f49724-e500-4735-9eaa-f28ab2fe7d34/cert-manager-webhook/0.log" Dec 10 12:15:09 crc kubenswrapper[4682]: I1210 12:15:09.381130 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:15:09 crc kubenswrapper[4682]: E1210 12:15:09.383005 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:15:09 crc kubenswrapper[4682]: I1210 12:15:09.790630 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"500f35428be80884766a23672608a264bb1d830f5f6200fb1d53feade4c8bb3c"} Dec 10 12:15:17 crc kubenswrapper[4682]: E1210 12:15:17.382393 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:15:18 crc kubenswrapper[4682]: I1210 12:15:18.764814 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-qp2p9_20cd6a69-1431-4436-b960-a1910bb43824/nmstate-console-plugin/0.log" Dec 10 12:15:18 crc kubenswrapper[4682]: I1210 12:15:18.960649 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-bk4zp_daa4f7f7-a1b7-4580-a1bd-a23cc5c8805a/nmstate-handler/0.log" Dec 10 12:15:18 crc kubenswrapper[4682]: I1210 12:15:18.988368 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-zb5sb_8ff1d21d-ef0f-421b-bb86-264a367afea9/kube-rbac-proxy/0.log" Dec 10 12:15:19 crc kubenswrapper[4682]: I1210 12:15:19.027956 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-zb5sb_8ff1d21d-ef0f-421b-bb86-264a367afea9/nmstate-metrics/0.log" Dec 10 12:15:19 crc kubenswrapper[4682]: I1210 12:15:19.172521 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-gng7d_8045c75b-04d3-4ffc-a268-3bcce0b6a747/nmstate-operator/0.log" Dec 10 12:15:19 crc kubenswrapper[4682]: I1210 12:15:19.264013 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-vvtwc_17409d60-d1ec-49a2-8c40-a8786491d77b/nmstate-webhook/0.log" Dec 10 12:15:24 crc kubenswrapper[4682]: E1210 12:15:24.383319 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:15:30 crc kubenswrapper[4682]: I1210 12:15:30.767935 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-7fcbf8fdb4-m96c8_351331cd-a02a-4356-9143-325ba6a4c72a/kube-rbac-proxy/0.log" Dec 10 12:15:30 crc kubenswrapper[4682]: I1210 12:15:30.820293 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-7fcbf8fdb4-m96c8_351331cd-a02a-4356-9143-325ba6a4c72a/manager/0.log" Dec 10 12:15:31 crc kubenswrapper[4682]: E1210 12:15:31.384736 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:15:36 crc kubenswrapper[4682]: I1210 12:15:36.385082 4682 scope.go:117] "RemoveContainer" containerID="0d95e0e3da956db2990146c0229acf3a18a911823ff307bb9dd6e56b2ea44142" Dec 10 12:15:37 crc kubenswrapper[4682]: E1210 12:15:37.391904 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:15:43 crc kubenswrapper[4682]: E1210 12:15:43.383608 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:15:44 crc kubenswrapper[4682]: I1210 12:15:44.928928 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-vxx5m_511918ac-6534-424c-8dcc-6af79a689e3b/kube-rbac-proxy/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.073303 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-vxx5m_511918ac-6534-424c-8dcc-6af79a689e3b/controller/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.151208 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-frr-files/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.440928 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-frr-files/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.454245 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-reloader/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.459989 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-reloader/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.474868 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-metrics/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.696702 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-frr-files/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.716034 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-metrics/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.716251 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-reloader/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.733933 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-metrics/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.922595 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-frr-files/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.948406 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-reloader/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.967207 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/cp-metrics/0.log" Dec 10 12:15:45 crc kubenswrapper[4682]: I1210 12:15:45.991366 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/controller/0.log" Dec 10 12:15:46 crc kubenswrapper[4682]: I1210 12:15:46.440501 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/frr-metrics/0.log" Dec 10 12:15:46 crc kubenswrapper[4682]: I1210 12:15:46.445072 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/kube-rbac-proxy-frr/0.log" Dec 10 12:15:46 crc kubenswrapper[4682]: I1210 12:15:46.461209 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/kube-rbac-proxy/0.log" Dec 10 12:15:46 crc kubenswrapper[4682]: I1210 12:15:46.656020 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-m264b_9e2611bd-3314-4e57-9167-e2fbfa6fecf2/frr-k8s-webhook-server/0.log" Dec 10 12:15:46 crc kubenswrapper[4682]: I1210 12:15:46.671234 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/reloader/0.log" Dec 10 12:15:47 crc kubenswrapper[4682]: I1210 12:15:47.630653 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-59c7c5b449-wb9kw_0d34c412-0fb1-4dd3-9d93-66d805babdb3/manager/0.log" Dec 10 12:15:47 crc kubenswrapper[4682]: I1210 12:15:47.897890 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-l959x_d57c92a8-faaf-46ae-969f-db2ceefc22f0/frr/0.log" Dec 10 12:15:47 crc kubenswrapper[4682]: I1210 12:15:47.915842 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6f885c4f9b-4fdwm_4c20b9d3-1c34-4d9a-8917-8933d9c376ce/webhook-server/0.log" Dec 10 12:15:47 crc kubenswrapper[4682]: I1210 12:15:47.957362 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-lwpvn_85685af9-4c58-4bee-bf6b-abe9fb2626f9/kube-rbac-proxy/0.log" Dec 10 12:15:48 crc kubenswrapper[4682]: I1210 12:15:48.565318 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-lwpvn_85685af9-4c58-4bee-bf6b-abe9fb2626f9/speaker/0.log" Dec 10 12:15:52 crc kubenswrapper[4682]: E1210 12:15:52.383574 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:15:58 crc kubenswrapper[4682]: E1210 12:15:58.383022 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.371571 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/util/0.log" Dec 10 12:16:04 crc kubenswrapper[4682]: E1210 12:16:04.390093 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.553228 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/util/0.log" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.649551 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/pull/0.log" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.691128 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/pull/0.log" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.880627 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/extract/0.log" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.917547 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/pull/0.log" Dec 10 12:16:04 crc kubenswrapper[4682]: I1210 12:16:04.919985 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f4s7dv_7601d99a-9766-47a0-931d-b42823276eeb/util/0.log" Dec 10 12:16:05 crc kubenswrapper[4682]: I1210 12:16:05.084976 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/util/0.log" Dec 10 12:16:05 crc kubenswrapper[4682]: I1210 12:16:05.270096 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/util/0.log" Dec 10 12:16:05 crc kubenswrapper[4682]: I1210 12:16:05.279687 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/pull/0.log" Dec 10 12:16:05 crc kubenswrapper[4682]: I1210 12:16:05.322867 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/pull/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.042987 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/util/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.123780 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/pull/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.262159 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210nqfpz_e373320b-0c25-4165-b27a-ff5b889dd9a9/extract/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.454418 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/util/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.622759 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/util/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.648884 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/pull/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.715603 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/pull/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.861301 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/util/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.865075 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/pull/0.log" Dec 10 12:16:06 crc kubenswrapper[4682]: I1210 12:16:06.892078 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_7b5aa1f5b38b68c96e281700110eb6f32773ca4b2682978fa6f2ffb2c18ddcd_e52fc6a0-640e-4c38-b90d-93faeb8b8b7b/extract/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.045358 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/util/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.265276 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/util/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.266434 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/pull/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.285485 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/pull/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.442338 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/util/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.483621 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/pull/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.515016 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83hp46q_8d258b61-c222-4a6a-9ca6-e73e7d1919b7/extract/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.707714 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/extract-utilities/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.890495 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/extract-utilities/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.925556 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/extract-content/0.log" Dec 10 12:16:07 crc kubenswrapper[4682]: I1210 12:16:07.943831 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/extract-content/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.135957 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/extract-content/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.149861 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/extract-utilities/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.220246 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/extract-utilities/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.487803 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5ztff_9e3e5b17-ecad-4090-911d-37d92a72377b/registry-server/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.491679 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/extract-utilities/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.507386 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/extract-content/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.543126 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/extract-content/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.743729 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/extract-utilities/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.754261 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/extract-content/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.818927 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-pcvj2_27962a48-9d75-4437-bc45-9258a223ebbb/marketplace-operator/0.log" Dec 10 12:16:08 crc kubenswrapper[4682]: I1210 12:16:08.966666 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/extract-utilities/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.243047 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/extract-content/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.251140 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/extract-content/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.288688 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/extract-utilities/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.503208 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/extract-content/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.505386 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/extract-utilities/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.548833 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qqtm6_646faf8c-7ee0-40f1-a240-18d7e8314632/registry-server/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.762424 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/extract-utilities/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.804141 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-4qz5q_25834669-f151-40f3-8e93-78092435a84e/registry-server/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.961428 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2qhzg"] Dec 10 12:16:09 crc kubenswrapper[4682]: E1210 12:16:09.962019 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2764964e-6bf4-4413-adb3-8a4f95d42809" containerName="collect-profiles" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.962044 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="2764964e-6bf4-4413-adb3-8a4f95d42809" containerName="collect-profiles" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.962336 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="2764964e-6bf4-4413-adb3-8a4f95d42809" containerName="collect-profiles" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.964328 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.972765 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/extract-content/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.973007 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/extract-utilities/0.log" Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.976772 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2qhzg"] Dec 10 12:16:09 crc kubenswrapper[4682]: I1210 12:16:09.980112 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/extract-content/0.log" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.056156 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsc5c\" (UniqueName: \"kubernetes.io/projected/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-kube-api-access-dsc5c\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.056250 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-catalog-content\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.056299 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-utilities\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.153376 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/extract-utilities/0.log" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.153414 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/extract-content/0.log" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.158413 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsc5c\" (UniqueName: \"kubernetes.io/projected/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-kube-api-access-dsc5c\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.158705 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-catalog-content\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.158846 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-utilities\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.159340 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-utilities\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.159617 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-catalog-content\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.180023 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsc5c\" (UniqueName: \"kubernetes.io/projected/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-kube-api-access-dsc5c\") pod \"certified-operators-2qhzg\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.287273 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:10 crc kubenswrapper[4682]: I1210 12:16:10.984343 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2qhzg"] Dec 10 12:16:11 crc kubenswrapper[4682]: I1210 12:16:11.010643 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-w85mw_1865d09a-4c60-4370-9cf4-378d20749b59/registry-server/0.log" Dec 10 12:16:11 crc kubenswrapper[4682]: E1210 12:16:11.383068 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:16:11 crc kubenswrapper[4682]: I1210 12:16:11.415430 4682 generic.go:334] "Generic (PLEG): container finished" podID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerID="d3627b497928a830ecabf26adcf2fb6047897c92df7eb70b8226fa1773ebbe50" exitCode=0 Dec 10 12:16:11 crc kubenswrapper[4682]: I1210 12:16:11.415522 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qhzg" event={"ID":"0f9b44cf-6e45-4921-a215-1fb8ea5e3778","Type":"ContainerDied","Data":"d3627b497928a830ecabf26adcf2fb6047897c92df7eb70b8226fa1773ebbe50"} Dec 10 12:16:11 crc kubenswrapper[4682]: I1210 12:16:11.415551 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qhzg" event={"ID":"0f9b44cf-6e45-4921-a215-1fb8ea5e3778","Type":"ContainerStarted","Data":"2c53cbdd61ffd22b334d6034da6b19666bf4c4953aa38b8c991d520db28c4a80"} Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.548350 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rkgpr"] Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.551274 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.572287 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rkgpr"] Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.735370 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-utilities\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.735591 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-catalog-content\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.735751 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv6sn\" (UniqueName: \"kubernetes.io/projected/c4877511-494d-453e-8d87-ed2198ca5d4f-kube-api-access-hv6sn\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.837703 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-utilities\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.837810 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-catalog-content\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.837891 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv6sn\" (UniqueName: \"kubernetes.io/projected/c4877511-494d-453e-8d87-ed2198ca5d4f-kube-api-access-hv6sn\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.838400 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-catalog-content\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.838424 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-utilities\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.865648 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv6sn\" (UniqueName: \"kubernetes.io/projected/c4877511-494d-453e-8d87-ed2198ca5d4f-kube-api-access-hv6sn\") pod \"redhat-marketplace-rkgpr\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:12 crc kubenswrapper[4682]: I1210 12:16:12.871826 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:13 crc kubenswrapper[4682]: I1210 12:16:13.415089 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rkgpr"] Dec 10 12:16:13 crc kubenswrapper[4682]: I1210 12:16:13.436853 4682 generic.go:334] "Generic (PLEG): container finished" podID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerID="dcd50b154fe9994ba3f5ead8ac0f59a0fda2559179cd0396c030b30aaf1df5b2" exitCode=0 Dec 10 12:16:13 crc kubenswrapper[4682]: I1210 12:16:13.436923 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qhzg" event={"ID":"0f9b44cf-6e45-4921-a215-1fb8ea5e3778","Type":"ContainerDied","Data":"dcd50b154fe9994ba3f5ead8ac0f59a0fda2559179cd0396c030b30aaf1df5b2"} Dec 10 12:16:13 crc kubenswrapper[4682]: W1210 12:16:13.893646 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4877511_494d_453e_8d87_ed2198ca5d4f.slice/crio-4efd23c4291ab58414ecc4a0e8a529b78f7914d1abbad064bd98f3721b02a9e0 WatchSource:0}: Error finding container 4efd23c4291ab58414ecc4a0e8a529b78f7914d1abbad064bd98f3721b02a9e0: Status 404 returned error can't find the container with id 4efd23c4291ab58414ecc4a0e8a529b78f7914d1abbad064bd98f3721b02a9e0 Dec 10 12:16:14 crc kubenswrapper[4682]: I1210 12:16:14.530574 4682 generic.go:334] "Generic (PLEG): container finished" podID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerID="cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9" exitCode=0 Dec 10 12:16:14 crc kubenswrapper[4682]: I1210 12:16:14.531312 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerDied","Data":"cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9"} Dec 10 12:16:14 crc kubenswrapper[4682]: I1210 12:16:14.531353 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerStarted","Data":"4efd23c4291ab58414ecc4a0e8a529b78f7914d1abbad064bd98f3721b02a9e0"} Dec 10 12:16:15 crc kubenswrapper[4682]: E1210 12:16:15.382293 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:16:15 crc kubenswrapper[4682]: I1210 12:16:15.544208 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qhzg" event={"ID":"0f9b44cf-6e45-4921-a215-1fb8ea5e3778","Type":"ContainerStarted","Data":"8fb74f1bd6a1a00ca3e71f3648bf9498dc4b643ad4d6ae9d97fc87d2bb3544e0"} Dec 10 12:16:15 crc kubenswrapper[4682]: I1210 12:16:15.547565 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerStarted","Data":"e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43"} Dec 10 12:16:15 crc kubenswrapper[4682]: I1210 12:16:15.572442 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2qhzg" podStartSLOduration=3.6225398479999997 podStartE2EDuration="6.572422313s" podCreationTimestamp="2025-12-10 12:16:09 +0000 UTC" firstStartedPulling="2025-12-10 12:16:11.417518268 +0000 UTC m=+5451.737729018" lastFinishedPulling="2025-12-10 12:16:14.367400723 +0000 UTC m=+5454.687611483" observedRunningTime="2025-12-10 12:16:15.567577132 +0000 UTC m=+5455.887787882" watchObservedRunningTime="2025-12-10 12:16:15.572422313 +0000 UTC m=+5455.892633063" Dec 10 12:16:16 crc kubenswrapper[4682]: I1210 12:16:16.559341 4682 generic.go:334] "Generic (PLEG): container finished" podID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerID="e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43" exitCode=0 Dec 10 12:16:16 crc kubenswrapper[4682]: I1210 12:16:16.559539 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerDied","Data":"e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43"} Dec 10 12:16:17 crc kubenswrapper[4682]: I1210 12:16:17.602135 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerStarted","Data":"73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206"} Dec 10 12:16:17 crc kubenswrapper[4682]: I1210 12:16:17.624820 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rkgpr" podStartSLOduration=3.181473454 podStartE2EDuration="5.624799237s" podCreationTimestamp="2025-12-10 12:16:12 +0000 UTC" firstStartedPulling="2025-12-10 12:16:14.532547446 +0000 UTC m=+5454.852758186" lastFinishedPulling="2025-12-10 12:16:16.975873219 +0000 UTC m=+5457.296083969" observedRunningTime="2025-12-10 12:16:17.618601804 +0000 UTC m=+5457.938812554" watchObservedRunningTime="2025-12-10 12:16:17.624799237 +0000 UTC m=+5457.945009987" Dec 10 12:16:20 crc kubenswrapper[4682]: I1210 12:16:20.289401 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:20 crc kubenswrapper[4682]: I1210 12:16:20.289760 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:20 crc kubenswrapper[4682]: I1210 12:16:20.351650 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:20 crc kubenswrapper[4682]: I1210 12:16:20.680477 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:22 crc kubenswrapper[4682]: I1210 12:16:22.334233 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2qhzg"] Dec 10 12:16:22 crc kubenswrapper[4682]: I1210 12:16:22.652797 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2qhzg" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="registry-server" containerID="cri-o://8fb74f1bd6a1a00ca3e71f3648bf9498dc4b643ad4d6ae9d97fc87d2bb3544e0" gracePeriod=2 Dec 10 12:16:22 crc kubenswrapper[4682]: I1210 12:16:22.873211 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:22 crc kubenswrapper[4682]: I1210 12:16:22.873255 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:22 crc kubenswrapper[4682]: I1210 12:16:22.931351 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:23 crc kubenswrapper[4682]: E1210 12:16:23.383252 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:16:23 crc kubenswrapper[4682]: I1210 12:16:23.671957 4682 generic.go:334] "Generic (PLEG): container finished" podID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerID="8fb74f1bd6a1a00ca3e71f3648bf9498dc4b643ad4d6ae9d97fc87d2bb3544e0" exitCode=0 Dec 10 12:16:23 crc kubenswrapper[4682]: I1210 12:16:23.672009 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qhzg" event={"ID":"0f9b44cf-6e45-4921-a215-1fb8ea5e3778","Type":"ContainerDied","Data":"8fb74f1bd6a1a00ca3e71f3648bf9498dc4b643ad4d6ae9d97fc87d2bb3544e0"} Dec 10 12:16:23 crc kubenswrapper[4682]: I1210 12:16:23.722611 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.684948 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qhzg" event={"ID":"0f9b44cf-6e45-4921-a215-1fb8ea5e3778","Type":"ContainerDied","Data":"2c53cbdd61ffd22b334d6034da6b19666bf4c4953aa38b8c991d520db28c4a80"} Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.685290 4682 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c53cbdd61ffd22b334d6034da6b19666bf4c4953aa38b8c991d520db28c4a80" Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.870582 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.932709 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsc5c\" (UniqueName: \"kubernetes.io/projected/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-kube-api-access-dsc5c\") pod \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.933333 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-utilities\") pod \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.933652 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-catalog-content\") pod \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\" (UID: \"0f9b44cf-6e45-4921-a215-1fb8ea5e3778\") " Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.934204 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-utilities" (OuterVolumeSpecName: "utilities") pod "0f9b44cf-6e45-4921-a215-1fb8ea5e3778" (UID: "0f9b44cf-6e45-4921-a215-1fb8ea5e3778"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.934551 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.938905 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-kube-api-access-dsc5c" (OuterVolumeSpecName: "kube-api-access-dsc5c") pod "0f9b44cf-6e45-4921-a215-1fb8ea5e3778" (UID: "0f9b44cf-6e45-4921-a215-1fb8ea5e3778"). InnerVolumeSpecName "kube-api-access-dsc5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:16:24 crc kubenswrapper[4682]: I1210 12:16:24.989557 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0f9b44cf-6e45-4921-a215-1fb8ea5e3778" (UID: "0f9b44cf-6e45-4921-a215-1fb8ea5e3778"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:16:25 crc kubenswrapper[4682]: I1210 12:16:25.036256 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:25 crc kubenswrapper[4682]: I1210 12:16:25.036289 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsc5c\" (UniqueName: \"kubernetes.io/projected/0f9b44cf-6e45-4921-a215-1fb8ea5e3778-kube-api-access-dsc5c\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:25 crc kubenswrapper[4682]: I1210 12:16:25.695004 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qhzg" Dec 10 12:16:25 crc kubenswrapper[4682]: I1210 12:16:25.747557 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2qhzg"] Dec 10 12:16:25 crc kubenswrapper[4682]: I1210 12:16:25.780054 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2qhzg"] Dec 10 12:16:26 crc kubenswrapper[4682]: I1210 12:16:26.396580 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" path="/var/lib/kubelet/pods/0f9b44cf-6e45-4921-a215-1fb8ea5e3778/volumes" Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.363731 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-jkr7g_c67b5f8a-c145-46aa-8074-32612df1d2a2/prometheus-operator/0.log" Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.604405 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7c4479dc7c-s6f9n_7e3209c7-0b3d-4bf4-9393-2fd01d97e1aa/prometheus-operator-admission-webhook/0.log" Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.688500 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7c4479dc7c-xr84t_26e76d66-6fe7-4796-b0e6-d767d3f12d22/prometheus-operator-admission-webhook/0.log" Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.732633 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rkgpr"] Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.732876 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rkgpr" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="registry-server" containerID="cri-o://73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206" gracePeriod=2 Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.860098 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-pgsh6_98eb9d3b-204c-4e4e-ac7e-484ac354bbca/operator/0.log" Dec 10 12:16:27 crc kubenswrapper[4682]: I1210 12:16:27.960895 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-d5rcq_690f858b-11ca-4449-89ed-5f3fb287113e/perses-operator/0.log" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.335242 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.441885 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-utilities\") pod \"c4877511-494d-453e-8d87-ed2198ca5d4f\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.442018 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-catalog-content\") pod \"c4877511-494d-453e-8d87-ed2198ca5d4f\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.442921 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hv6sn\" (UniqueName: \"kubernetes.io/projected/c4877511-494d-453e-8d87-ed2198ca5d4f-kube-api-access-hv6sn\") pod \"c4877511-494d-453e-8d87-ed2198ca5d4f\" (UID: \"c4877511-494d-453e-8d87-ed2198ca5d4f\") " Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.446036 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-utilities" (OuterVolumeSpecName: "utilities") pod "c4877511-494d-453e-8d87-ed2198ca5d4f" (UID: "c4877511-494d-453e-8d87-ed2198ca5d4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.446780 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.450485 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4877511-494d-453e-8d87-ed2198ca5d4f-kube-api-access-hv6sn" (OuterVolumeSpecName: "kube-api-access-hv6sn") pod "c4877511-494d-453e-8d87-ed2198ca5d4f" (UID: "c4877511-494d-453e-8d87-ed2198ca5d4f"). InnerVolumeSpecName "kube-api-access-hv6sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.476973 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c4877511-494d-453e-8d87-ed2198ca5d4f" (UID: "c4877511-494d-453e-8d87-ed2198ca5d4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.548647 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hv6sn\" (UniqueName: \"kubernetes.io/projected/c4877511-494d-453e-8d87-ed2198ca5d4f-kube-api-access-hv6sn\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.548690 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4877511-494d-453e-8d87-ed2198ca5d4f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.726529 4682 generic.go:334] "Generic (PLEG): container finished" podID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerID="73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206" exitCode=0 Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.726726 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rkgpr" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.726730 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerDied","Data":"73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206"} Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.727634 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rkgpr" event={"ID":"c4877511-494d-453e-8d87-ed2198ca5d4f","Type":"ContainerDied","Data":"4efd23c4291ab58414ecc4a0e8a529b78f7914d1abbad064bd98f3721b02a9e0"} Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.727663 4682 scope.go:117] "RemoveContainer" containerID="73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.758716 4682 scope.go:117] "RemoveContainer" containerID="e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.769670 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rkgpr"] Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.781412 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rkgpr"] Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.832222 4682 scope.go:117] "RemoveContainer" containerID="cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.862659 4682 scope.go:117] "RemoveContainer" containerID="73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206" Dec 10 12:16:28 crc kubenswrapper[4682]: E1210 12:16:28.863381 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206\": container with ID starting with 73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206 not found: ID does not exist" containerID="73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.863443 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206"} err="failed to get container status \"73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206\": rpc error: code = NotFound desc = could not find container \"73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206\": container with ID starting with 73125fb9b747ea449acae39c7381ff9d148bff2690c5bf2fa43aeb1b1e6d0206 not found: ID does not exist" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.863470 4682 scope.go:117] "RemoveContainer" containerID="e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43" Dec 10 12:16:28 crc kubenswrapper[4682]: E1210 12:16:28.864820 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43\": container with ID starting with e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43 not found: ID does not exist" containerID="e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.864848 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43"} err="failed to get container status \"e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43\": rpc error: code = NotFound desc = could not find container \"e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43\": container with ID starting with e1ad2d70e770801095fa2305392b530e8339d6d3e1d3f04b12b518d4e9183d43 not found: ID does not exist" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.864868 4682 scope.go:117] "RemoveContainer" containerID="cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9" Dec 10 12:16:28 crc kubenswrapper[4682]: E1210 12:16:28.865782 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9\": container with ID starting with cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9 not found: ID does not exist" containerID="cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9" Dec 10 12:16:28 crc kubenswrapper[4682]: I1210 12:16:28.865809 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9"} err="failed to get container status \"cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9\": rpc error: code = NotFound desc = could not find container \"cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9\": container with ID starting with cd99519f41fbb999a9a90169ea9ebaf2ecea58002c118f5477bafbbe4d675cc9 not found: ID does not exist" Dec 10 12:16:30 crc kubenswrapper[4682]: E1210 12:16:30.389868 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:16:30 crc kubenswrapper[4682]: I1210 12:16:30.393954 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" path="/var/lib/kubelet/pods/c4877511-494d-453e-8d87-ed2198ca5d4f/volumes" Dec 10 12:16:36 crc kubenswrapper[4682]: E1210 12:16:36.383950 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:16:40 crc kubenswrapper[4682]: I1210 12:16:40.936007 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-7fcbf8fdb4-m96c8_351331cd-a02a-4356-9143-325ba6a4c72a/kube-rbac-proxy/0.log" Dec 10 12:16:40 crc kubenswrapper[4682]: I1210 12:16:40.989861 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-7fcbf8fdb4-m96c8_351331cd-a02a-4356-9143-325ba6a4c72a/manager/0.log" Dec 10 12:16:41 crc kubenswrapper[4682]: E1210 12:16:41.383648 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:16:49 crc kubenswrapper[4682]: E1210 12:16:49.390705 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:16:56 crc kubenswrapper[4682]: E1210 12:16:56.383121 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:17:02 crc kubenswrapper[4682]: E1210 12:17:02.382917 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:17:08 crc kubenswrapper[4682]: E1210 12:17:08.382533 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:17:13 crc kubenswrapper[4682]: E1210 12:17:13.385378 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:17:20 crc kubenswrapper[4682]: E1210 12:17:20.403940 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:17:24 crc kubenswrapper[4682]: E1210 12:17:24.384718 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:17:35 crc kubenswrapper[4682]: E1210 12:17:35.383189 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:17:36 crc kubenswrapper[4682]: I1210 12:17:36.478222 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:17:36 crc kubenswrapper[4682]: I1210 12:17:36.478726 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:17:37 crc kubenswrapper[4682]: E1210 12:17:37.383968 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:17:47 crc kubenswrapper[4682]: E1210 12:17:47.391243 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:17:48 crc kubenswrapper[4682]: E1210 12:17:48.388336 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.542146 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qhqwf"] Dec 10 12:17:51 crc kubenswrapper[4682]: E1210 12:17:51.543556 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="extract-content" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543571 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="extract-content" Dec 10 12:17:51 crc kubenswrapper[4682]: E1210 12:17:51.543593 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="extract-utilities" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543599 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="extract-utilities" Dec 10 12:17:51 crc kubenswrapper[4682]: E1210 12:17:51.543609 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="extract-content" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543615 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="extract-content" Dec 10 12:17:51 crc kubenswrapper[4682]: E1210 12:17:51.543624 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="registry-server" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543629 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="registry-server" Dec 10 12:17:51 crc kubenswrapper[4682]: E1210 12:17:51.543650 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="extract-utilities" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543656 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="extract-utilities" Dec 10 12:17:51 crc kubenswrapper[4682]: E1210 12:17:51.543665 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="registry-server" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543671 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="registry-server" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543887 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4877511-494d-453e-8d87-ed2198ca5d4f" containerName="registry-server" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.543909 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f9b44cf-6e45-4921-a215-1fb8ea5e3778" containerName="registry-server" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.545798 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.583197 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qhqwf"] Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.710199 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-utilities\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.710742 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-catalog-content\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.710956 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22vvs\" (UniqueName: \"kubernetes.io/projected/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-kube-api-access-22vvs\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.812900 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-catalog-content\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.812998 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22vvs\" (UniqueName: \"kubernetes.io/projected/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-kube-api-access-22vvs\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.813086 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-utilities\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.813397 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-catalog-content\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.813668 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-utilities\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.833274 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22vvs\" (UniqueName: \"kubernetes.io/projected/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-kube-api-access-22vvs\") pod \"redhat-operators-qhqwf\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:51 crc kubenswrapper[4682]: I1210 12:17:51.881404 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:17:52 crc kubenswrapper[4682]: I1210 12:17:52.467044 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qhqwf"] Dec 10 12:17:52 crc kubenswrapper[4682]: I1210 12:17:52.596392 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerStarted","Data":"9743f2fc02f12cb4e02a7db4169ff31638c0ba082c40890ed96e944ccee854cf"} Dec 10 12:17:53 crc kubenswrapper[4682]: I1210 12:17:53.606598 4682 generic.go:334] "Generic (PLEG): container finished" podID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerID="bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf" exitCode=0 Dec 10 12:17:53 crc kubenswrapper[4682]: I1210 12:17:53.606650 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerDied","Data":"bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf"} Dec 10 12:17:54 crc kubenswrapper[4682]: I1210 12:17:54.622561 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerStarted","Data":"d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2"} Dec 10 12:17:58 crc kubenswrapper[4682]: E1210 12:17:58.384361 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:17:58 crc kubenswrapper[4682]: I1210 12:17:58.672665 4682 generic.go:334] "Generic (PLEG): container finished" podID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerID="d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2" exitCode=0 Dec 10 12:17:58 crc kubenswrapper[4682]: I1210 12:17:58.672707 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerDied","Data":"d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2"} Dec 10 12:17:59 crc kubenswrapper[4682]: I1210 12:17:59.698124 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerStarted","Data":"1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4"} Dec 10 12:17:59 crc kubenswrapper[4682]: I1210 12:17:59.730204 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qhqwf" podStartSLOduration=3.200815518 podStartE2EDuration="8.730181999s" podCreationTimestamp="2025-12-10 12:17:51 +0000 UTC" firstStartedPulling="2025-12-10 12:17:53.608840251 +0000 UTC m=+5553.929051001" lastFinishedPulling="2025-12-10 12:17:59.138206732 +0000 UTC m=+5559.458417482" observedRunningTime="2025-12-10 12:17:59.720863679 +0000 UTC m=+5560.041074449" watchObservedRunningTime="2025-12-10 12:17:59.730181999 +0000 UTC m=+5560.050392749" Dec 10 12:18:01 crc kubenswrapper[4682]: E1210 12:18:01.382992 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:18:01 crc kubenswrapper[4682]: I1210 12:18:01.881750 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:18:01 crc kubenswrapper[4682]: I1210 12:18:01.882095 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:18:02 crc kubenswrapper[4682]: I1210 12:18:02.932174 4682 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qhqwf" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="registry-server" probeResult="failure" output=< Dec 10 12:18:02 crc kubenswrapper[4682]: timeout: failed to connect service ":50051" within 1s Dec 10 12:18:02 crc kubenswrapper[4682]: > Dec 10 12:18:06 crc kubenswrapper[4682]: I1210 12:18:06.479153 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:18:06 crc kubenswrapper[4682]: I1210 12:18:06.479523 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:18:09 crc kubenswrapper[4682]: E1210 12:18:09.383552 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:18:11 crc kubenswrapper[4682]: I1210 12:18:11.937166 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:18:11 crc kubenswrapper[4682]: I1210 12:18:11.994719 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:18:12 crc kubenswrapper[4682]: I1210 12:18:12.180334 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qhqwf"] Dec 10 12:18:13 crc kubenswrapper[4682]: I1210 12:18:13.852678 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qhqwf" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="registry-server" containerID="cri-o://1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4" gracePeriod=2 Dec 10 12:18:14 crc kubenswrapper[4682]: E1210 12:18:14.383064 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.458043 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.581283 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22vvs\" (UniqueName: \"kubernetes.io/projected/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-kube-api-access-22vvs\") pod \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.581398 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-catalog-content\") pod \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.581500 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-utilities\") pod \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\" (UID: \"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef\") " Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.582399 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-utilities" (OuterVolumeSpecName: "utilities") pod "eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" (UID: "eae5cf57-ff21-49ab-ad15-9141d0ebf1ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.589493 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-kube-api-access-22vvs" (OuterVolumeSpecName: "kube-api-access-22vvs") pod "eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" (UID: "eae5cf57-ff21-49ab-ad15-9141d0ebf1ef"). InnerVolumeSpecName "kube-api-access-22vvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.684948 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.685163 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22vvs\" (UniqueName: \"kubernetes.io/projected/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-kube-api-access-22vvs\") on node \"crc\" DevicePath \"\"" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.705246 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" (UID: "eae5cf57-ff21-49ab-ad15-9141d0ebf1ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.788606 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.870781 4682 generic.go:334] "Generic (PLEG): container finished" podID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerID="1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4" exitCode=0 Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.870836 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerDied","Data":"1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4"} Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.870867 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhqwf" event={"ID":"eae5cf57-ff21-49ab-ad15-9141d0ebf1ef","Type":"ContainerDied","Data":"9743f2fc02f12cb4e02a7db4169ff31638c0ba082c40890ed96e944ccee854cf"} Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.870888 4682 scope.go:117] "RemoveContainer" containerID="1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.871060 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhqwf" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.915225 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qhqwf"] Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.916888 4682 scope.go:117] "RemoveContainer" containerID="d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2" Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.926108 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qhqwf"] Dec 10 12:18:14 crc kubenswrapper[4682]: I1210 12:18:14.944125 4682 scope.go:117] "RemoveContainer" containerID="bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf" Dec 10 12:18:15 crc kubenswrapper[4682]: I1210 12:18:15.005584 4682 scope.go:117] "RemoveContainer" containerID="1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4" Dec 10 12:18:15 crc kubenswrapper[4682]: E1210 12:18:15.006059 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4\": container with ID starting with 1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4 not found: ID does not exist" containerID="1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4" Dec 10 12:18:15 crc kubenswrapper[4682]: I1210 12:18:15.006141 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4"} err="failed to get container status \"1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4\": rpc error: code = NotFound desc = could not find container \"1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4\": container with ID starting with 1c3c0bb9dcf43d092f3a88b5a671b325837bbbd598c748801c28dab71528eba4 not found: ID does not exist" Dec 10 12:18:15 crc kubenswrapper[4682]: I1210 12:18:15.006164 4682 scope.go:117] "RemoveContainer" containerID="d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2" Dec 10 12:18:15 crc kubenswrapper[4682]: E1210 12:18:15.006460 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2\": container with ID starting with d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2 not found: ID does not exist" containerID="d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2" Dec 10 12:18:15 crc kubenswrapper[4682]: I1210 12:18:15.006512 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2"} err="failed to get container status \"d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2\": rpc error: code = NotFound desc = could not find container \"d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2\": container with ID starting with d6930e0faaef43abd086604cd0e53327e1c0dab0ec3151f1746fd386a9878af2 not found: ID does not exist" Dec 10 12:18:15 crc kubenswrapper[4682]: I1210 12:18:15.006532 4682 scope.go:117] "RemoveContainer" containerID="bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf" Dec 10 12:18:15 crc kubenswrapper[4682]: E1210 12:18:15.006832 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf\": container with ID starting with bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf not found: ID does not exist" containerID="bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf" Dec 10 12:18:15 crc kubenswrapper[4682]: I1210 12:18:15.006926 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf"} err="failed to get container status \"bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf\": rpc error: code = NotFound desc = could not find container \"bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf\": container with ID starting with bb48bfca81018857e81d96c31c39a84d6da385480605708f82fff6204cd91daf not found: ID does not exist" Dec 10 12:18:16 crc kubenswrapper[4682]: I1210 12:18:16.398145 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" path="/var/lib/kubelet/pods/eae5cf57-ff21-49ab-ad15-9141d0ebf1ef/volumes" Dec 10 12:18:20 crc kubenswrapper[4682]: I1210 12:18:20.928641 4682 generic.go:334] "Generic (PLEG): container finished" podID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerID="a5ea6fd590620139b87675b071b001a4be00b6363cef627d1109f66785049365" exitCode=0 Dec 10 12:18:20 crc kubenswrapper[4682]: I1210 12:18:20.928710 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" event={"ID":"bcfca302-1a28-4fe3-b059-96f1b8b41aff","Type":"ContainerDied","Data":"a5ea6fd590620139b87675b071b001a4be00b6363cef627d1109f66785049365"} Dec 10 12:18:20 crc kubenswrapper[4682]: I1210 12:18:20.930276 4682 scope.go:117] "RemoveContainer" containerID="a5ea6fd590620139b87675b071b001a4be00b6363cef627d1109f66785049365" Dec 10 12:18:21 crc kubenswrapper[4682]: I1210 12:18:21.062257 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z8nb5_must-gather-nnl6v_bcfca302-1a28-4fe3-b059-96f1b8b41aff/gather/0.log" Dec 10 12:18:21 crc kubenswrapper[4682]: E1210 12:18:21.382629 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:18:26 crc kubenswrapper[4682]: I1210 12:18:26.383323 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:18:26 crc kubenswrapper[4682]: E1210 12:18:26.489453 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:18:26 crc kubenswrapper[4682]: E1210 12:18:26.489558 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:18:26 crc kubenswrapper[4682]: E1210 12:18:26.489787 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:18:26 crc kubenswrapper[4682]: E1210 12:18:26.490972 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:18:28 crc kubenswrapper[4682]: I1210 12:18:28.813383 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-z8nb5/must-gather-nnl6v"] Dec 10 12:18:28 crc kubenswrapper[4682]: I1210 12:18:28.814219 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="copy" containerID="cri-o://02b8458220c53c7f25d06903d7e9ba4378c957a630c0b3fa70818eb06bd10559" gracePeriod=2 Dec 10 12:18:28 crc kubenswrapper[4682]: I1210 12:18:28.826052 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-z8nb5/must-gather-nnl6v"] Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.007806 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z8nb5_must-gather-nnl6v_bcfca302-1a28-4fe3-b059-96f1b8b41aff/copy/0.log" Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.008513 4682 generic.go:334] "Generic (PLEG): container finished" podID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerID="02b8458220c53c7f25d06903d7e9ba4378c957a630c0b3fa70818eb06bd10559" exitCode=143 Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.304381 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z8nb5_must-gather-nnl6v_bcfca302-1a28-4fe3-b059-96f1b8b41aff/copy/0.log" Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.305062 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.492314 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzh6x\" (UniqueName: \"kubernetes.io/projected/bcfca302-1a28-4fe3-b059-96f1b8b41aff-kube-api-access-xzh6x\") pod \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.492508 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bcfca302-1a28-4fe3-b059-96f1b8b41aff-must-gather-output\") pod \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\" (UID: \"bcfca302-1a28-4fe3-b059-96f1b8b41aff\") " Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.498914 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcfca302-1a28-4fe3-b059-96f1b8b41aff-kube-api-access-xzh6x" (OuterVolumeSpecName: "kube-api-access-xzh6x") pod "bcfca302-1a28-4fe3-b059-96f1b8b41aff" (UID: "bcfca302-1a28-4fe3-b059-96f1b8b41aff"). InnerVolumeSpecName "kube-api-access-xzh6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.595381 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzh6x\" (UniqueName: \"kubernetes.io/projected/bcfca302-1a28-4fe3-b059-96f1b8b41aff-kube-api-access-xzh6x\") on node \"crc\" DevicePath \"\"" Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.659434 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcfca302-1a28-4fe3-b059-96f1b8b41aff-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "bcfca302-1a28-4fe3-b059-96f1b8b41aff" (UID: "bcfca302-1a28-4fe3-b059-96f1b8b41aff"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:18:29 crc kubenswrapper[4682]: I1210 12:18:29.697259 4682 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/bcfca302-1a28-4fe3-b059-96f1b8b41aff-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 10 12:18:30 crc kubenswrapper[4682]: I1210 12:18:30.021628 4682 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z8nb5_must-gather-nnl6v_bcfca302-1a28-4fe3-b059-96f1b8b41aff/copy/0.log" Dec 10 12:18:30 crc kubenswrapper[4682]: I1210 12:18:30.022608 4682 scope.go:117] "RemoveContainer" containerID="02b8458220c53c7f25d06903d7e9ba4378c957a630c0b3fa70818eb06bd10559" Dec 10 12:18:30 crc kubenswrapper[4682]: I1210 12:18:30.022744 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z8nb5/must-gather-nnl6v" Dec 10 12:18:30 crc kubenswrapper[4682]: I1210 12:18:30.051180 4682 scope.go:117] "RemoveContainer" containerID="a5ea6fd590620139b87675b071b001a4be00b6363cef627d1109f66785049365" Dec 10 12:18:30 crc kubenswrapper[4682]: I1210 12:18:30.403361 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" path="/var/lib/kubelet/pods/bcfca302-1a28-4fe3-b059-96f1b8b41aff/volumes" Dec 10 12:18:36 crc kubenswrapper[4682]: I1210 12:18:36.478856 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:18:36 crc kubenswrapper[4682]: I1210 12:18:36.479419 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:18:36 crc kubenswrapper[4682]: I1210 12:18:36.479498 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 12:18:36 crc kubenswrapper[4682]: I1210 12:18:36.480409 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"500f35428be80884766a23672608a264bb1d830f5f6200fb1d53feade4c8bb3c"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:18:36 crc kubenswrapper[4682]: I1210 12:18:36.480497 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://500f35428be80884766a23672608a264bb1d830f5f6200fb1d53feade4c8bb3c" gracePeriod=600 Dec 10 12:18:36 crc kubenswrapper[4682]: I1210 12:18:36.498247 4682 scope.go:117] "RemoveContainer" containerID="23c05d53a2c8d960449d304ca7360b713dbaa2bdf9e155223ce0001ed1b9cded" Dec 10 12:18:36 crc kubenswrapper[4682]: E1210 12:18:36.499154 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:18:36 crc kubenswrapper[4682]: E1210 12:18:36.499218 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:18:36 crc kubenswrapper[4682]: E1210 12:18:36.499373 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:18:36 crc kubenswrapper[4682]: E1210 12:18:36.500527 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:18:37 crc kubenswrapper[4682]: I1210 12:18:37.147217 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="500f35428be80884766a23672608a264bb1d830f5f6200fb1d53feade4c8bb3c" exitCode=0 Dec 10 12:18:37 crc kubenswrapper[4682]: I1210 12:18:37.147290 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"500f35428be80884766a23672608a264bb1d830f5f6200fb1d53feade4c8bb3c"} Dec 10 12:18:37 crc kubenswrapper[4682]: I1210 12:18:37.148062 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerStarted","Data":"b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48"} Dec 10 12:18:37 crc kubenswrapper[4682]: I1210 12:18:37.148100 4682 scope.go:117] "RemoveContainer" containerID="42ae13aba4f17f159c942c84dbfe9e02275eaccb0c631dd5044691bdc89ddb20" Dec 10 12:18:41 crc kubenswrapper[4682]: E1210 12:18:41.386614 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.760294 4682 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wrwpk"] Dec 10 12:18:49 crc kubenswrapper[4682]: E1210 12:18:49.761212 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="gather" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761224 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="gather" Dec 10 12:18:49 crc kubenswrapper[4682]: E1210 12:18:49.761247 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="extract-content" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761254 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="extract-content" Dec 10 12:18:49 crc kubenswrapper[4682]: E1210 12:18:49.761266 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="extract-utilities" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761273 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="extract-utilities" Dec 10 12:18:49 crc kubenswrapper[4682]: E1210 12:18:49.761309 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="copy" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761314 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="copy" Dec 10 12:18:49 crc kubenswrapper[4682]: E1210 12:18:49.761327 4682 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="registry-server" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761332 4682 state_mem.go:107] "Deleted CPUSet assignment" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="registry-server" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761564 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="copy" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761581 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcfca302-1a28-4fe3-b059-96f1b8b41aff" containerName="gather" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.761593 4682 memory_manager.go:354] "RemoveStaleState removing state" podUID="eae5cf57-ff21-49ab-ad15-9141d0ebf1ef" containerName="registry-server" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.763299 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.773882 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wrwpk"] Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.818924 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f46b2\" (UniqueName: \"kubernetes.io/projected/b8048dc3-6fb7-4d6f-b690-c515a8130400-kube-api-access-f46b2\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.819050 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-catalog-content\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.819079 4682 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-utilities\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.920974 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f46b2\" (UniqueName: \"kubernetes.io/projected/b8048dc3-6fb7-4d6f-b690-c515a8130400-kube-api-access-f46b2\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.921058 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-catalog-content\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.921083 4682 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-utilities\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.921554 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-catalog-content\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.921680 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-utilities\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:49 crc kubenswrapper[4682]: I1210 12:18:49.948615 4682 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f46b2\" (UniqueName: \"kubernetes.io/projected/b8048dc3-6fb7-4d6f-b690-c515a8130400-kube-api-access-f46b2\") pod \"community-operators-wrwpk\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:50 crc kubenswrapper[4682]: I1210 12:18:50.095762 4682 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:18:50 crc kubenswrapper[4682]: I1210 12:18:50.694138 4682 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wrwpk"] Dec 10 12:18:50 crc kubenswrapper[4682]: W1210 12:18:50.704255 4682 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8048dc3_6fb7_4d6f_b690_c515a8130400.slice/crio-90befc6c9f23fbf5c2ad6f704044bc8036d3367c8f9e525ae8b9b539fa1b8d61 WatchSource:0}: Error finding container 90befc6c9f23fbf5c2ad6f704044bc8036d3367c8f9e525ae8b9b539fa1b8d61: Status 404 returned error can't find the container with id 90befc6c9f23fbf5c2ad6f704044bc8036d3367c8f9e525ae8b9b539fa1b8d61 Dec 10 12:18:51 crc kubenswrapper[4682]: I1210 12:18:51.289131 4682 generic.go:334] "Generic (PLEG): container finished" podID="b8048dc3-6fb7-4d6f-b690-c515a8130400" containerID="7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc" exitCode=0 Dec 10 12:18:51 crc kubenswrapper[4682]: I1210 12:18:51.289202 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerDied","Data":"7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc"} Dec 10 12:18:51 crc kubenswrapper[4682]: I1210 12:18:51.290641 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerStarted","Data":"90befc6c9f23fbf5c2ad6f704044bc8036d3367c8f9e525ae8b9b539fa1b8d61"} Dec 10 12:18:51 crc kubenswrapper[4682]: E1210 12:18:51.383308 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:18:52 crc kubenswrapper[4682]: I1210 12:18:52.301250 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerStarted","Data":"798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140"} Dec 10 12:18:52 crc kubenswrapper[4682]: E1210 12:18:52.386209 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:18:53 crc kubenswrapper[4682]: I1210 12:18:53.310844 4682 generic.go:334] "Generic (PLEG): container finished" podID="b8048dc3-6fb7-4d6f-b690-c515a8130400" containerID="798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140" exitCode=0 Dec 10 12:18:53 crc kubenswrapper[4682]: I1210 12:18:53.310889 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerDied","Data":"798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140"} Dec 10 12:18:55 crc kubenswrapper[4682]: I1210 12:18:55.387239 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerStarted","Data":"0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620"} Dec 10 12:18:55 crc kubenswrapper[4682]: I1210 12:18:55.404024 4682 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wrwpk" podStartSLOduration=2.781184249 podStartE2EDuration="6.404004818s" podCreationTimestamp="2025-12-10 12:18:49 +0000 UTC" firstStartedPulling="2025-12-10 12:18:51.292177352 +0000 UTC m=+5611.612388102" lastFinishedPulling="2025-12-10 12:18:54.914997921 +0000 UTC m=+5615.235208671" observedRunningTime="2025-12-10 12:18:55.402842183 +0000 UTC m=+5615.723052963" watchObservedRunningTime="2025-12-10 12:18:55.404004818 +0000 UTC m=+5615.724215568" Dec 10 12:19:00 crc kubenswrapper[4682]: I1210 12:19:00.096942 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:19:00 crc kubenswrapper[4682]: I1210 12:19:00.097253 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:19:00 crc kubenswrapper[4682]: I1210 12:19:00.162332 4682 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:19:00 crc kubenswrapper[4682]: I1210 12:19:00.497366 4682 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:19:00 crc kubenswrapper[4682]: I1210 12:19:00.552028 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wrwpk"] Dec 10 12:19:02 crc kubenswrapper[4682]: I1210 12:19:02.469564 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wrwpk" podUID="b8048dc3-6fb7-4d6f-b690-c515a8130400" containerName="registry-server" containerID="cri-o://0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620" gracePeriod=2 Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.035442 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.196827 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-catalog-content\") pod \"b8048dc3-6fb7-4d6f-b690-c515a8130400\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.196930 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f46b2\" (UniqueName: \"kubernetes.io/projected/b8048dc3-6fb7-4d6f-b690-c515a8130400-kube-api-access-f46b2\") pod \"b8048dc3-6fb7-4d6f-b690-c515a8130400\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.197071 4682 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-utilities\") pod \"b8048dc3-6fb7-4d6f-b690-c515a8130400\" (UID: \"b8048dc3-6fb7-4d6f-b690-c515a8130400\") " Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.197933 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-utilities" (OuterVolumeSpecName: "utilities") pod "b8048dc3-6fb7-4d6f-b690-c515a8130400" (UID: "b8048dc3-6fb7-4d6f-b690-c515a8130400"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.203813 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8048dc3-6fb7-4d6f-b690-c515a8130400-kube-api-access-f46b2" (OuterVolumeSpecName: "kube-api-access-f46b2") pod "b8048dc3-6fb7-4d6f-b690-c515a8130400" (UID: "b8048dc3-6fb7-4d6f-b690-c515a8130400"). InnerVolumeSpecName "kube-api-access-f46b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.259364 4682 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b8048dc3-6fb7-4d6f-b690-c515a8130400" (UID: "b8048dc3-6fb7-4d6f-b690-c515a8130400"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.300247 4682 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.300291 4682 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f46b2\" (UniqueName: \"kubernetes.io/projected/b8048dc3-6fb7-4d6f-b690-c515a8130400-kube-api-access-f46b2\") on node \"crc\" DevicePath \"\"" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.300305 4682 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8048dc3-6fb7-4d6f-b690-c515a8130400-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 12:19:03 crc kubenswrapper[4682]: E1210 12:19:03.383695 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:19:03 crc kubenswrapper[4682]: E1210 12:19:03.383706 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.479030 4682 generic.go:334] "Generic (PLEG): container finished" podID="b8048dc3-6fb7-4d6f-b690-c515a8130400" containerID="0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620" exitCode=0 Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.479068 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerDied","Data":"0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620"} Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.479085 4682 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wrwpk" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.479092 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wrwpk" event={"ID":"b8048dc3-6fb7-4d6f-b690-c515a8130400","Type":"ContainerDied","Data":"90befc6c9f23fbf5c2ad6f704044bc8036d3367c8f9e525ae8b9b539fa1b8d61"} Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.479110 4682 scope.go:117] "RemoveContainer" containerID="0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.511965 4682 scope.go:117] "RemoveContainer" containerID="798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.526331 4682 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wrwpk"] Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.534070 4682 scope.go:117] "RemoveContainer" containerID="7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.537334 4682 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wrwpk"] Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.586420 4682 scope.go:117] "RemoveContainer" containerID="0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620" Dec 10 12:19:03 crc kubenswrapper[4682]: E1210 12:19:03.587062 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620\": container with ID starting with 0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620 not found: ID does not exist" containerID="0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.587103 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620"} err="failed to get container status \"0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620\": rpc error: code = NotFound desc = could not find container \"0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620\": container with ID starting with 0fcbf207a9d6cf8daa624b043af56c4baed8287a014d08fee2f6e2a27f5d7620 not found: ID does not exist" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.587130 4682 scope.go:117] "RemoveContainer" containerID="798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140" Dec 10 12:19:03 crc kubenswrapper[4682]: E1210 12:19:03.588162 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140\": container with ID starting with 798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140 not found: ID does not exist" containerID="798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.588213 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140"} err="failed to get container status \"798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140\": rpc error: code = NotFound desc = could not find container \"798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140\": container with ID starting with 798f82d98c17219a4d53890e957da69ac2955acb629da9243edf884820c51140 not found: ID does not exist" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.588248 4682 scope.go:117] "RemoveContainer" containerID="7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc" Dec 10 12:19:03 crc kubenswrapper[4682]: E1210 12:19:03.588913 4682 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc\": container with ID starting with 7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc not found: ID does not exist" containerID="7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc" Dec 10 12:19:03 crc kubenswrapper[4682]: I1210 12:19:03.588948 4682 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc"} err="failed to get container status \"7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc\": rpc error: code = NotFound desc = could not find container \"7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc\": container with ID starting with 7cc0e783216beb5d6c3d9f093a44020f5a5f4b0804415827c3398d9b2a9a0fcc not found: ID does not exist" Dec 10 12:19:03 crc kubenswrapper[4682]: E1210 12:19:03.674434 4682 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8048dc3_6fb7_4d6f_b690_c515a8130400.slice\": RecentStats: unable to find data in memory cache]" Dec 10 12:19:04 crc kubenswrapper[4682]: I1210 12:19:04.395155 4682 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8048dc3-6fb7-4d6f-b690-c515a8130400" path="/var/lib/kubelet/pods/b8048dc3-6fb7-4d6f-b690-c515a8130400/volumes" Dec 10 12:19:16 crc kubenswrapper[4682]: E1210 12:19:16.383693 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:19:17 crc kubenswrapper[4682]: E1210 12:19:17.382646 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:19:28 crc kubenswrapper[4682]: E1210 12:19:28.382655 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:19:31 crc kubenswrapper[4682]: E1210 12:19:31.383100 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:19:41 crc kubenswrapper[4682]: E1210 12:19:41.383352 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:19:43 crc kubenswrapper[4682]: E1210 12:19:43.382207 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:19:52 crc kubenswrapper[4682]: E1210 12:19:52.382783 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:19:58 crc kubenswrapper[4682]: E1210 12:19:58.384054 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:20:05 crc kubenswrapper[4682]: E1210 12:20:05.384090 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:20:13 crc kubenswrapper[4682]: E1210 12:20:13.384955 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:20:20 crc kubenswrapper[4682]: E1210 12:20:20.395874 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:20:26 crc kubenswrapper[4682]: E1210 12:20:26.383666 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:20:34 crc kubenswrapper[4682]: E1210 12:20:34.384005 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:20:36 crc kubenswrapper[4682]: I1210 12:20:36.478361 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:20:36 crc kubenswrapper[4682]: I1210 12:20:36.479053 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:20:41 crc kubenswrapper[4682]: E1210 12:20:41.382638 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:20:49 crc kubenswrapper[4682]: E1210 12:20:49.383065 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:20:52 crc kubenswrapper[4682]: E1210 12:20:52.397323 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:21:01 crc kubenswrapper[4682]: E1210 12:21:01.383737 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:21:06 crc kubenswrapper[4682]: E1210 12:21:06.384096 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:21:06 crc kubenswrapper[4682]: I1210 12:21:06.478227 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:21:06 crc kubenswrapper[4682]: I1210 12:21:06.478290 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:21:12 crc kubenswrapper[4682]: E1210 12:21:12.383348 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:21:19 crc kubenswrapper[4682]: E1210 12:21:19.383155 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:21:23 crc kubenswrapper[4682]: E1210 12:21:23.434467 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:21:30 crc kubenswrapper[4682]: E1210 12:21:30.390444 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:21:36 crc kubenswrapper[4682]: E1210 12:21:36.384044 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:21:36 crc kubenswrapper[4682]: I1210 12:21:36.478767 4682 patch_prober.go:28] interesting pod/machine-config-daemon-58skk container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 12:21:36 crc kubenswrapper[4682]: I1210 12:21:36.479295 4682 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 12:21:36 crc kubenswrapper[4682]: I1210 12:21:36.479356 4682 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-58skk" Dec 10 12:21:36 crc kubenswrapper[4682]: I1210 12:21:36.480526 4682 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48"} pod="openshift-machine-config-operator/machine-config-daemon-58skk" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 12:21:36 crc kubenswrapper[4682]: I1210 12:21:36.480632 4682 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" containerName="machine-config-daemon" containerID="cri-o://b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" gracePeriod=600 Dec 10 12:21:36 crc kubenswrapper[4682]: E1210 12:21:36.608159 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:21:37 crc kubenswrapper[4682]: I1210 12:21:37.022129 4682 generic.go:334] "Generic (PLEG): container finished" podID="b504d5b4-49dc-499d-b17c-957131ba411e" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" exitCode=0 Dec 10 12:21:37 crc kubenswrapper[4682]: I1210 12:21:37.022177 4682 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-58skk" event={"ID":"b504d5b4-49dc-499d-b17c-957131ba411e","Type":"ContainerDied","Data":"b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48"} Dec 10 12:21:37 crc kubenswrapper[4682]: I1210 12:21:37.022221 4682 scope.go:117] "RemoveContainer" containerID="500f35428be80884766a23672608a264bb1d830f5f6200fb1d53feade4c8bb3c" Dec 10 12:21:37 crc kubenswrapper[4682]: I1210 12:21:37.022949 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:21:37 crc kubenswrapper[4682]: E1210 12:21:37.023269 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:21:42 crc kubenswrapper[4682]: E1210 12:21:42.388036 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:21:49 crc kubenswrapper[4682]: I1210 12:21:49.381398 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:21:49 crc kubenswrapper[4682]: E1210 12:21:49.382659 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:21:51 crc kubenswrapper[4682]: E1210 12:21:51.383317 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:21:55 crc kubenswrapper[4682]: E1210 12:21:55.399518 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:22:02 crc kubenswrapper[4682]: I1210 12:22:02.380985 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:22:02 crc kubenswrapper[4682]: E1210 12:22:02.381895 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:22:04 crc kubenswrapper[4682]: E1210 12:22:04.384992 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:22:07 crc kubenswrapper[4682]: E1210 12:22:07.383030 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:22:14 crc kubenswrapper[4682]: I1210 12:22:14.381816 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:22:14 crc kubenswrapper[4682]: E1210 12:22:14.382629 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:22:19 crc kubenswrapper[4682]: E1210 12:22:19.383264 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:22:20 crc kubenswrapper[4682]: E1210 12:22:20.389571 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:22:27 crc kubenswrapper[4682]: I1210 12:22:27.380922 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:22:27 crc kubenswrapper[4682]: E1210 12:22:27.381757 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:22:31 crc kubenswrapper[4682]: E1210 12:22:31.384179 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:22:33 crc kubenswrapper[4682]: E1210 12:22:33.382663 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:22:36 crc kubenswrapper[4682]: I1210 12:22:36.676726 4682 scope.go:117] "RemoveContainer" containerID="d3627b497928a830ecabf26adcf2fb6047897c92df7eb70b8226fa1773ebbe50" Dec 10 12:22:36 crc kubenswrapper[4682]: I1210 12:22:36.701653 4682 scope.go:117] "RemoveContainer" containerID="8fb74f1bd6a1a00ca3e71f3648bf9498dc4b643ad4d6ae9d97fc87d2bb3544e0" Dec 10 12:22:36 crc kubenswrapper[4682]: I1210 12:22:36.763317 4682 scope.go:117] "RemoveContainer" containerID="dcd50b154fe9994ba3f5ead8ac0f59a0fda2559179cd0396c030b30aaf1df5b2" Dec 10 12:22:41 crc kubenswrapper[4682]: I1210 12:22:41.381608 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:22:41 crc kubenswrapper[4682]: E1210 12:22:41.382762 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:22:43 crc kubenswrapper[4682]: E1210 12:22:43.384008 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:22:45 crc kubenswrapper[4682]: E1210 12:22:45.383823 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:22:52 crc kubenswrapper[4682]: I1210 12:22:52.381827 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:22:52 crc kubenswrapper[4682]: E1210 12:22:52.382632 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:22:54 crc kubenswrapper[4682]: E1210 12:22:54.383592 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:22:57 crc kubenswrapper[4682]: E1210 12:22:57.382671 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:23:03 crc kubenswrapper[4682]: I1210 12:23:03.381383 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:23:03 crc kubenswrapper[4682]: E1210 12:23:03.382231 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:23:08 crc kubenswrapper[4682]: E1210 12:23:08.383580 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:23:12 crc kubenswrapper[4682]: E1210 12:23:12.383375 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:23:18 crc kubenswrapper[4682]: I1210 12:23:18.381882 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:23:18 crc kubenswrapper[4682]: E1210 12:23:18.382759 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:23:21 crc kubenswrapper[4682]: E1210 12:23:21.383215 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested\\\"\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:23:23 crc kubenswrapper[4682]: E1210 12:23:23.383102 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:23:29 crc kubenswrapper[4682]: I1210 12:23:29.381582 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:23:29 crc kubenswrapper[4682]: E1210 12:23:29.383631 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" Dec 10 12:23:36 crc kubenswrapper[4682]: I1210 12:23:36.383933 4682 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 12:23:36 crc kubenswrapper[4682]: E1210 12:23:36.518317 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:23:36 crc kubenswrapper[4682]: E1210 12:23:36.518386 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested" Dec 10 12:23:36 crc kubenswrapper[4682]: E1210 12:23:36.518570 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cloudkitty-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CloudKittyPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:CloudKittyPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:cloudkitty-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:certs,ReadOnly:true,MountPath:/var/lib/openstack/loki-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9j78w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42406,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cloudkitty-db-sync-cdf59_openstack(105b676e-6612-406e-984b-86afbf8ede6c): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:23:36 crc kubenswrapper[4682]: E1210 12:23:36.519827 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cloudkitty-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/cloudkitty-db-sync-cdf59" podUID="105b676e-6612-406e-984b-86afbf8ede6c" Dec 10 12:23:38 crc kubenswrapper[4682]: E1210 12:23:38.505310 4682 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:23:38 crc kubenswrapper[4682]: E1210 12:23:38.505724 4682 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 12:23:38 crc kubenswrapper[4682]: E1210 12:23:38.506028 4682 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n595h86h685h655h94h5d8hffhcfh5d8h77h5b7h5dh685h5f7h656hf6h689h674h668h675h86hfhd8h95hbdh678h679h598hf4h59dhf5h5d9q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l9bz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(58163ec6-c74c-4db2-aad7-c5f598a75856): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 12:23:38 crc kubenswrapper[4682]: E1210 12:23:38.508195 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="58163ec6-c74c-4db2-aad7-c5f598a75856" Dec 10 12:23:41 crc kubenswrapper[4682]: I1210 12:23:41.383112 4682 scope.go:117] "RemoveContainer" containerID="b737b917bf3e0b3fc97bdf0526947ae659a53df6a184e160ed5c0ac8a5f31c48" Dec 10 12:23:41 crc kubenswrapper[4682]: E1210 12:23:41.384004 4682 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-58skk_openshift-machine-config-operator(b504d5b4-49dc-499d-b17c-957131ba411e)\"" pod="openshift-machine-config-operator/machine-config-daemon-58skk" podUID="b504d5b4-49dc-499d-b17c-957131ba411e" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116263130024443 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116263130017360 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116247134016511 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116247134015461 5ustar corecore